From 96829b8ac477b7e734888939fcd3fa423734c43a Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 9 Jun 2023 13:07:33 +0000 Subject: [PATCH 001/543] lxd/storage/zfs/utils: Add helper function to get multiple dataset properties Signed-off-by: Din Music --- lxd/storage/drivers/driver_zfs_utils.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/lxd/storage/drivers/driver_zfs_utils.go b/lxd/storage/drivers/driver_zfs_utils.go index 41c6927639d1..f48ff7dd940c 100644 --- a/lxd/storage/drivers/driver_zfs_utils.go +++ b/lxd/storage/drivers/driver_zfs_utils.go @@ -252,6 +252,29 @@ func (d *zfs) getDatasetProperty(dataset string, key string) (string, error) { return strings.TrimSpace(output), nil } +func (d *zfs) getDatasetProperties(dataset string, keys ...string) (map[string]string, error) { + output, err := shared.RunCommand("zfs", "get", "-H", "-p", "-o", "property,value", strings.Join(keys, ","), dataset) + if err != nil { + return nil, err + } + + props := make(map[string]string, len(keys)) + + for _, row := range strings.Split(output, "\n") { + prop := strings.Split(row, "\t") + + if len(prop) < 2 { + continue + } + + key := prop[0] + val := prop[1] + props[key] = val + } + + return props, nil +} + // version returns the ZFS version based on package or kernel module version. func (d *zfs) version() (string, error) { // This function is only really ever relevant on Ubuntu as the only From a56e5c55d00e26b5c4680d659f88d1373bee4b53 Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 9 Jun 2023 13:07:58 +0000 Subject: [PATCH 002/543] lxd/storage/zfs/volumes: Fix ZFS does not respect atime=off option Signed-off-by: Din Music --- lxd/storage/drivers/driver_zfs_volumes.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/lxd/storage/drivers/driver_zfs_volumes.go b/lxd/storage/drivers/driver_zfs_volumes.go index 5335d972f6f8..a310446302fc 100644 --- a/lxd/storage/drivers/driver_zfs_volumes.go +++ b/lxd/storage/drivers/driver_zfs_volumes.go @@ -2012,8 +2012,20 @@ func (d *zfs) MountVolume(vol Volume, op *operations.Operation) error { return err } + var volOptions []string + + props, _ := d.getDatasetProperties(dataset, "atime", "relatime") + + if props["atime"] == "off" { + volOptions = append(volOptions, "noatime") + } else if props["relatime"] == "off" { + volOptions = append(volOptions, "strictatime") + } + + mountFlags, mountOptions := filesystem.ResolveMountOptions(volOptions) + // Mount the dataset. - err = TryMount(dataset, mountPath, "zfs", 0, "") + err = TryMount(dataset, mountPath, "zfs", mountFlags, mountOptions) if err != nil { return err } From 89f1ee8a4af2ca6c88579b7aa7a382f9d08efcac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Thu, 8 Jun 2023 17:02:52 -0400 Subject: [PATCH 003/543] lxd/instance/qemu/bus: Introduce allocateDirect MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- lxd/instance/drivers/driver_qemu_bus.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/lxd/instance/drivers/driver_qemu_bus.go b/lxd/instance/drivers/driver_qemu_bus.go index 569fa17b386a..cb5b9fccdd68 100644 --- a/lxd/instance/drivers/driver_qemu_bus.go +++ b/lxd/instance/drivers/driver_qemu_bus.go @@ -55,6 +55,16 @@ func (a *qemuBus) allocateRoot() *qemuBusEntry { // The multiFunctionGroup parameter allows for grouping devices together as one or more multi-function devices. // It automatically keeps track of the number of functions already used and will allocate new ports as needed. func (a *qemuBus) allocate(multiFunctionGroup string) (string, string, bool) { + return a.allocateInternal(multiFunctionGroup, true) +} + +// allocateDirect() works like allocate() but will directly attach the device to the root PCI bridge. +// This prevents hotplug or hotremove of the device but is sometimes required for compatibility reasons. +func (a *qemuBus) allocateDirect() (string, string, bool) { + return a.allocateInternal(busFunctionGroupNone, false) +} + +func (a *qemuBus) allocateInternal(multiFunctionGroup string, hotplug bool) (string, string, bool) { if a.name == "ccw" { return "", "", false } @@ -98,7 +108,7 @@ func (a *qemuBus) allocate(multiFunctionGroup string) (string, string, bool) { // Create a temporary single function group. p = &qemuBusEntry{} - if a.name == "pci" { + if a.name == "pci" || !hotplug { p.bridgeDev = a.devNum a.devNum++ } else if a.name == "pcie" { @@ -111,8 +121,8 @@ func (a *qemuBus) allocate(multiFunctionGroup string) (string, string, bool) { // The first device added to a multi-function port needs to specify the multi-function feature. multi := p.fn == 0 && multiFunctionGroup != "" - if a.name == "pci" { - return "pci.0", fmt.Sprintf("%x.%d", p.bridgeDev, p.fn), multi + if a.name == "pci" || !hotplug { + return fmt.Sprintf("%s.0", a.name), fmt.Sprintf("%x.%d", p.bridgeDev, p.fn), multi } if a.name == "pcie" { From f38f442cddac80afdac2b86780a2230f5d82de23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Thu, 8 Jun 2023 17:23:27 -0400 Subject: [PATCH 004/543] lxd/instance/qemu: Move SCSI to root bridge on CSM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- lxd/instance/drivers/driver_qemu.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 9c822fdfa7b7..a795712d875d 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -2809,7 +2809,16 @@ func (d *qemu) generateQemuConfigFile(cpuInfo *cpuTopology, mountInfo *storagePo cfg = append(cfg, qemuUSB(&usbOpts)...) } - devBus, devAddr, multi = bus.allocate(busFunctionGroupNone) + if shared.IsTrue(d.expandedConfig["security.csm"]) { + // Allocate a regular entry to keep things aligned normally (avoid NICs getting a different name). + _, _, _ = bus.allocate(busFunctionGroupNone) + + // Allocate a direct entry so the SCSI controller can be seen by seabios. + devBus, devAddr, multi = bus.allocateDirect() + } else { + devBus, devAddr, multi = bus.allocate(busFunctionGroupNone) + } + scsiOpts := qemuDevOpts{ busName: bus.name, devBus: devBus, From be851908c8da3a8a88a67e968f1cbc4ff2dd743b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Thu, 8 Jun 2023 21:36:46 -0400 Subject: [PATCH 005/543] lxd/instance/qemu: Move GPU to root bridge on CSM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- lxd/instance/drivers/driver_qemu.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index a795712d875d..470bbc5e288f 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -2864,7 +2864,16 @@ func (d *qemu) generateQemuConfigFile(cpuInfo *cpuTopology, mountInfo *storagePo cfg = append(cfg, qemuDriveConfig(&driveConfigVirtioOpts)...) } - devBus, devAddr, multi = bus.allocate(busFunctionGroupNone) + if shared.IsTrue(d.expandedConfig["security.csm"]) { + // Allocate a regular entry to keep things aligned normally (avoid NICs getting a different name). + _, _, _ = bus.allocate(busFunctionGroupNone) + + // Allocate a direct entry so the GPU can be seen by seabios. + devBus, devAddr, multi = bus.allocateDirect() + } else { + devBus, devAddr, multi = bus.allocate(busFunctionGroupNone) + } + gpuOpts := qemuGpuOpts{ dev: qemuDevOpts{ busName: bus.name, From b793f99e377002f4aede656ff97f19b70e1a4eac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Thu, 8 Jun 2023 21:37:23 -0400 Subject: [PATCH 006/543] lxd/apparmor/qemu: Add support for multiple OVMF builds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- lxd/apparmor/instance_qemu.go | 1 + 1 file changed, 1 insertion(+) diff --git a/lxd/apparmor/instance_qemu.go b/lxd/apparmor/instance_qemu.go index 3b1480d05120..689394927565 100644 --- a/lxd/apparmor/instance_qemu.go +++ b/lxd/apparmor/instance_qemu.go @@ -37,6 +37,7 @@ profile "{{ .name }}" flags=(attach_disconnected,mediate_deleted) { /sys/module/vhost/** r, /{,usr/}bin/qemu* mrix, {{ .ovmfPath }}/OVMF_CODE.fd kr, + {{ .ovmfPath }}/OVMF_CODE.*.fd kr, /usr/share/qemu/** kr, /usr/share/seabios/** kr, owner @{PROC}/@{pid}/cpuset r, From 22e429cd3f86ec3dfcec41a3df328cd268af5e28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Thu, 8 Jun 2023 23:16:22 -0400 Subject: [PATCH 007/543] lxd/instance/qemu: Support multiple OVMF firmwares MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This defines three types of OVMF firmwares: - Generic - Secure Boot - CSM For each of those, a set of suitable firmware and variable template locations are set. The sets are sorted in preference order. This logic allows LXD to know what variable template and firmware to use based on configuration (secureboot or csm) as well as to handle a mix of instances using 2MB or 4MB based firmwares. Closes #11515 Signed-off-by: Stéphane Graber --- lxd/instance/drivers/driver_qemu.go | 118 ++++++++++++++++++++++++---- 1 file changed, 101 insertions(+), 17 deletions(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 470bbc5e288f..5a26352bc4a2 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -101,6 +101,32 @@ const qemuBlockDevIDPrefix = "lxd_" // qemuMigrationNBDExportName is the name of the disk device export by the migration NBD server. const qemuMigrationNBDExportName = "lxd_root" +// OVMF firmwares. +type ovmfFirmware struct { + code string + vars string +} + +var ovmfGenericFirmwares = []ovmfFirmware{ + {code: "OVMF_CODE.4MB.fd", vars: "OVMF_VARS.4MB.fd"}, + {code: "OVMF_CODE.2MB.fd", vars: "OVMF_VARS.2MB.fd"}, + {code: "OVMF_CODE.fd", vars: "OVMF_VARS.fd"}, + {code: "OVMF_CODE.fd", vars: "qemu.nvram"}, +} + +var ovmfSecurebootFirmwares = []ovmfFirmware{ + {code: "OVMF_CODE.4MB.fd", vars: "OVMF_VARS.4MB.ms.fd"}, + {code: "OVMF_CODE.2MB.fd", vars: "OVMF_VARS.2MB.ms.fd"}, + {code: "OVMF_CODE.fd", vars: "OVMF_VARS.ms.fd"}, + {code: "OVMF_CODE.fd", vars: "qemu.nvram"}, +} + +var ovmfCSMFirmwares = []ovmfFirmware{ + {code: "OVMF_CODE.4MB.CSM.fd", vars: "OVMF_VARS.4MB.CSM.fd"}, + {code: "OVMF_CODE.2MB.CSM.fd", vars: "OVMF_VARS.2MB.CSM.fd"}, + {code: "OVMF_CODE.CSM.fd", vars: "OVMF_VARS.CSM.fd"}, +} + // qemuSparseUSBPorts is the amount of sparse USB ports for VMs. // 4 are reserved, and the other 4 can be used for any USB device. const qemuSparseUSBPorts = 8 @@ -1057,6 +1083,11 @@ func (d *qemu) start(stateful bool, op *operationlock.InstanceOperation) error { return fmt.Errorf("The image used by this instance is incompatible with secureboot. Please set security.secureboot=false on the instance") } + // Ensure secureboot is turned off when CSM is on + if shared.IsTrue(d.expandedConfig["security.csm"]) && shared.IsTrueOrEmpty(d.expandedConfig["security.secureboot"]) { + return fmt.Errorf("Secure boot can't be enabled while CSM is turned on. Please set security.secureboot=false on the instance") + } + // Setup a new operation if needed. if op == nil { op, err = operationlock.CreateWaitGet(d.Project().Name, d.Name(), operationlock.ActionStart, []operationlock.Action{operationlock.ActionRestart, operationlock.ActionRestore}, false, false) @@ -1798,32 +1829,61 @@ func (d *qemu) setupNvram() error { defer func() { _ = d.unmount() }() - srcOvmfFile := filepath.Join(d.ovmfPath(), "OVMF_VARS.fd") - if shared.IsTrueOrEmpty(d.expandedConfig["security.secureboot"]) { - srcOvmfFile = filepath.Join(d.ovmfPath(), "OVMF_VARS.ms.fd") + // Cleanup existing variables. + for _, firmwares := range [][]ovmfFirmware{ovmfGenericFirmwares, ovmfSecurebootFirmwares, ovmfCSMFirmwares} { + for _, firmware := range firmwares { + err := os.Remove(filepath.Join(d.Path(), firmware.vars)) + if err != nil && !os.IsNotExist(err) { + return err + } + } } - missingEFIFirmwareErr := fmt.Errorf("Required EFI firmware settings file missing %q", srcOvmfFile) - - if !shared.PathExists(srcOvmfFile) { - return missingEFIFirmwareErr + // Determine expected firmware. + firmwares := ovmfGenericFirmwares + if shared.IsTrue(d.expandedConfig["security.csm"]) { + firmwares = ovmfCSMFirmwares + } else if shared.IsTrueOrEmpty(d.expandedConfig["security.secureboot"]) { + firmwares = ovmfSecurebootFirmwares } - srcOvmfFile, err = filepath.EvalSymlinks(srcOvmfFile) - if err != nil { - return fmt.Errorf("Failed resolving EFI firmware symlink %q: %w", srcOvmfFile, err) + // Find the template file. + var ovmfVarsPath string + var ovmfVarsName string + for _, firmware := range firmwares { + varsPath := filepath.Join(d.ovmfPath(), firmware.vars) + varsPath, err = filepath.EvalSymlinks(varsPath) + if err != nil { + continue + } + + if shared.PathExists(varsPath) { + ovmfVarsPath = varsPath + ovmfVarsName = firmware.vars + break + } } - if !shared.PathExists(srcOvmfFile) { - return missingEFIFirmwareErr + if ovmfVarsPath == "" { + return fmt.Errorf("Couldn't find one of the required UEFI firmware files: %+v", firmwares) } - _ = os.Remove(d.nvramPath()) - err = shared.FileCopy(srcOvmfFile, d.nvramPath()) + // Copy the template. + err = shared.FileCopy(ovmfVarsPath, filepath.Join(d.Path(), ovmfVarsName)) if err != nil { return err } + // Generate a symlink if needed. + // This is so qemu.nvram can always be assumed to be the OVMF vars file. + // The real file name is then used to determine what firmware must be selected. + if !shared.PathExists(d.nvramPath()) { + err = os.Symlink(ovmfVarsName, d.nvramPath()) + if err != nil { + return err + } + } + return nil } @@ -2705,8 +2765,28 @@ func (d *qemu) generateQemuConfigFile(cpuInfo *cpuTopology, mountInfo *storagePo return "", nil, fmt.Errorf("Failed opening NVRAM file: %w", err) } + // Determine expected firmware. + firmwares := ovmfGenericFirmwares + if shared.IsTrue(d.expandedConfig["security.csm"]) { + firmwares = ovmfCSMFirmwares + } else if shared.IsTrueOrEmpty(d.expandedConfig["security.secureboot"]) { + firmwares = ovmfSecurebootFirmwares + } + + var ovmfCode string + for _, firmware := range firmwares { + if shared.PathExists(filepath.Join(d.Path(), firmware.vars)) { + ovmfCode = firmware.code + break + } + } + + if ovmfCode == "" { + return "", nil, fmt.Errorf("Unable to locate matching firmware: %+v", firmwares) + } + driveFirmwareOpts := qemuDriveFirmwareOpts{ - roPath: filepath.Join(d.ovmfPath(), "OVMF_CODE.fd"), + roPath: filepath.Join(d.ovmfPath(), ovmfCode), nvramPath: fmt.Sprintf("/dev/fd/%d", d.addFileDescriptor(fdFiles, nvRAMFile)), } @@ -4840,8 +4920,9 @@ func (d *qemu) Update(args db.InstanceArgs, userRequested bool) error { "cluster.evacuate", "limits.memory", "security.agent.metrics", - "security.secureboot", + "security.csm", "security.devlxd", + "security.secureboot", } isLiveUpdatable := func(key string) bool { @@ -4926,6 +5007,9 @@ func (d *qemu) Update(args db.InstanceArgs, userRequested bool) error { return fmt.Errorf("Failed updating memory limit: %w", err) } } + } else if key == "security.csm" { + // Defer rebuilding nvram until next start. + d.localConfig["volatile.apply_nvram"] = "true" } else if key == "security.secureboot" { // Defer rebuilding nvram until next start. d.localConfig["volatile.apply_nvram"] = "true" @@ -4954,7 +5038,7 @@ func (d *qemu) Update(args db.InstanceArgs, userRequested bool) error { } } - if d.architectureSupportsUEFI(d.architecture) && shared.StringInSlice("security.secureboot", changedConfig) { + if d.architectureSupportsUEFI(d.architecture) && (shared.StringInSlice("security.secureboot", changedConfig) || shared.StringInSlice("security.csm", changedConfig)) { // Re-generate the NVRAM. err = d.setupNvram() if err != nil { From 467d32a86a8d40e3072278ba51547a23752ecd0d Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 9 Jun 2023 12:50:10 -0400 Subject: [PATCH 008/543] lxd/endpoints: make sure to not access passed the end of the slice Signed-off-by: Simon Deziel --- lxd/endpoints/network_util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/endpoints/network_util.go b/lxd/endpoints/network_util.go index 6f6532eeeb80..55993eb8ffd4 100644 --- a/lxd/endpoints/network_util.go +++ b/lxd/endpoints/network_util.go @@ -27,7 +27,7 @@ func (d networkServerErrorLogWriter) Write(p []byte) (int, error) { func (d networkServerErrorLogWriter) stripLog(p []byte) string { // Strip the beginning of the log until we reach "http:". - for string(p[0:5]) != "http:" && len(p) > 0 { + for len(p) > 5 && string(p[0:5]) != "http:" { p = bytes.TrimLeftFunc(p, func(r rune) bool { return r != 'h' }) From c40bac0563b2609775baef63d529fa525695aca5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Sat, 10 Jun 2023 22:43:58 -0400 Subject: [PATCH 009/543] lxd/apparmor/archive: Fix snap handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- lxd/apparmor/archive.go | 1 + 1 file changed, 1 insertion(+) diff --git a/lxd/apparmor/archive.go b/lxd/apparmor/archive.go index 8eed3c386b9a..5c3a9f6b38b4 100644 --- a/lxd/apparmor/archive.go +++ b/lxd/apparmor/archive.go @@ -130,6 +130,7 @@ func archiveProfile(outputPath string, allowedCommandPaths []string) (string, er "backupsPath": backupsPath, "imagesPath": imagesPath, "allowedCommandPaths": derefCommandPaths, + "snap": shared.InSnap(), }) if err != nil { return "", err From 5e1b9a6d5d66991542a7f2d5f5deb59ba6e18431 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Sat, 10 Jun 2023 23:23:29 -0400 Subject: [PATCH 010/543] lxc/remote: Fix rename of global remotes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- lxc/remote.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lxc/remote.go b/lxc/remote.go index 5586650fffca..75076328da17 100644 --- a/lxc/remote.go +++ b/lxc/remote.go @@ -786,8 +786,6 @@ func (c *cmdRemoteRename) Run(cmd *cobra.Command, args []string) error { if err != nil { return err } - - rc.Global = false } else { err := os.Rename(oldPath, newPath) if err != nil { @@ -796,6 +794,7 @@ func (c *cmdRemoteRename) Run(cmd *cobra.Command, args []string) error { } } + rc.Global = false conf.Remotes[args[1]] = rc delete(conf.Remotes, args[0]) From 2790d94728638dd41da6f807ca04b99325ecd548 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Sun, 11 Jun 2023 14:26:45 -0400 Subject: [PATCH 011/543] lxd/main/forkproxy: use %v consistently when printing errors Signed-off-by: Simon Deziel --- lxd/main_forkproxy.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lxd/main_forkproxy.go b/lxd/main_forkproxy.go index 95184a6ca12a..30ba4a9d0d33 100644 --- a/lxd/main_forkproxy.go +++ b/lxd/main_forkproxy.go @@ -327,7 +327,7 @@ func listenerInstance(epFd C.int, lAddr *deviceConfig.ProxyAddress, cAddr *devic go func() { srcConn, err := net.FileConn((*lStruct).f) if err != nil { - fmt.Printf("Warning: Failed to re-assemble listener: %s\n", err) + fmt.Printf("Warning: Failed to re-assemble listener: %v\n", err) rearmUDPFd(epFd, connFd) return } @@ -685,7 +685,7 @@ func (c *cmdForkproxy) Run(cmd *cobra.Command, args []string) error { err := listenerInstance(epFd, lAddr, cAddr, curFd, srcConn, args[11] == "true") if err != nil { - fmt.Printf("Warning: Failed to prepare new listener instance: %s\n", err) + fmt.Printf("Warning: Failed to prepare new listener instance: %v\n", err) } } } From 5d77db0ba2673ffde1ea8ee76c852ec592879c55 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Sun, 11 Jun 2023 14:27:25 -0400 Subject: [PATCH 012/543] lxd/main/forkproxy: use Println() when no format specifier is used Signed-off-by: Simon Deziel --- lxd/main_forkproxy.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lxd/main_forkproxy.go b/lxd/main_forkproxy.go index 30ba4a9d0d33..378dcc58f355 100644 --- a/lxd/main_forkproxy.go +++ b/lxd/main_forkproxy.go @@ -305,7 +305,7 @@ func rearmUDPFd(epFd C.int, connFd C.int) { *(*C.int)(unsafe.Pointer(uintptr(unsafe.Pointer(&ev)) + unsafe.Sizeof(ev.events))) = connFd ret := C.epoll_ctl(epFd, C.EPOLL_CTL_MOD, connFd, &ev) if ret < 0 { - fmt.Printf("Error: Failed to add listener fd to epoll instance\n") + fmt.Println("Error: Failed to add listener fd to epoll instance") } } @@ -555,7 +555,7 @@ func (c *cmdForkproxy) Run(cmd *cobra.Command, args []string) error { } if f == nil { - fmt.Printf("Error: Failed to receive fd from listener process\n") + fmt.Println("Error: Failed to receive fd from listener process") _ = unix.Close(forkproxyUDSSockFDNum) return err } @@ -672,7 +672,7 @@ func (c *cmdForkproxy) Run(cmd *cobra.Command, args []string) error { nfds := C.lxc_epoll_wait_nointr(epFd, &events[0], 10, -1) if nfds < 0 { - fmt.Printf("Error: Failed to wait on epoll instance\n") + fmt.Println("Error: Failed to wait on epoll instance") break } @@ -690,7 +690,7 @@ func (c *cmdForkproxy) Run(cmd *cobra.Command, args []string) error { } } - fmt.Printf("Status: Stopping proxy\n") + fmt.Println("Status: Stopping proxy") return nil } From 5b1ccab310646860b442739ddf89ccdc068eb5a5 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Sun, 11 Jun 2023 13:44:15 -0400 Subject: [PATCH 013/543] lxd/main/init/interactive: use Print() and Println() when no format specifier is needed Signed-off-by: Simon Deziel --- lxd/main_init_interactive.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lxd/main_init_interactive.go b/lxd/main_init_interactive.go index 40ac971ab4ac..e269ea9ba651 100644 --- a/lxd/main_init_interactive.go +++ b/lxd/main_init_interactive.go @@ -265,8 +265,8 @@ func (c *cmdInit) askClustering(config *api.InitPreseed, d lxd.InstanceServer, s } certDigest := shared.CertFingerprint(cert) - fmt.Printf("Cluster fingerprint: %s\n", certDigest) - fmt.Printf("You can validate this fingerprint by running \"lxc info\" locally on an existing cluster member.\n") + fmt.Println("Cluster fingerprint:", certDigest) + fmt.Println("You can validate this fingerprint by running \"lxc info\" locally on an existing cluster member.") validator := func(input string) error { if input == certDigest { @@ -902,7 +902,7 @@ func (c *cmdInit) askStoragePool(config *api.InitPreseed, d lxd.InstanceServer, if pool.Driver == "lvm" { _, err := exec.LookPath("thin_check") if err != nil { - fmt.Printf(` + fmt.Print(` The LVM thin provisioning tools couldn't be found. LVM can still be used without thin provisioning but this will disable over-provisioning, increase the space requirements and creation time of images, instances and snapshots. @@ -935,7 +935,7 @@ func (c *cmdInit) askDaemon(config *api.InitPreseed, d lxd.InstanceServer, serve // Detect lack of uid/gid idmapset, err := idmap.DefaultIdmapSet("", "") if (err != nil || len(idmapset.Idmap) == 0 || idmapset.Usable() != nil) && shared.RunningInUserNS() { - fmt.Printf(` + fmt.Print(` We detected that you are running inside an unprivileged container. This means that unless you manually configured your host otherwise, you will not have enough uids and gids to allocate to your containers. From 6fc845fadce4c69e5a6d635311cf195600254867 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Sun, 11 Jun 2023 11:59:01 -0400 Subject: [PATCH 014/543] lxd/main/sql: use Println() when no format specifier is used Signed-off-by: Simon Deziel --- lxd/main_sql.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/main_sql.go b/lxd/main_sql.go index 0353cbb3823a..567774b119b8 100644 --- a/lxd/main_sql.go +++ b/lxd/main_sql.go @@ -145,7 +145,7 @@ func (c *cmdSql) Run(cmd *cobra.Command, args []string) error { } if len(batch.Results) > 1 { - fmt.Printf("\n") + fmt.Println("") } } return nil From 9da7acfea6f0ee8ceff00f9ccfac3476deff8ebc Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Sun, 11 Jun 2023 16:59:56 -0400 Subject: [PATCH 015/543] lxd/main/recover: use Println() when no format specifier is used Signed-off-by: Simon Deziel --- lxd/main_recover.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lxd/main_recover.go b/lxd/main_recover.go index 9e730f66d70d..ecff77c8edd6 100644 --- a/lxd/main_recover.go +++ b/lxd/main_recover.go @@ -58,7 +58,7 @@ func (c *cmdRecover) Run(cmd *cobra.Command, args []string) error { return fmt.Errorf("Failed getting existing storage pools: %w", err) } - fmt.Print("This LXD server currently has the following storage pools:\n") + fmt.Println("This LXD server currently has the following storage pools:") for _, existingPool := range existingPools { fmt.Printf(" - %s (backend=%q, source=%q)\n", existingPool.Name, existingPool.Driver, existingPool.Config["source"]) } @@ -146,7 +146,7 @@ func (c *cmdRecover) Run(cmd *cobra.Command, args []string) error { } } - fmt.Printf("The recovery process will be scanning the following storage pools:\n") + fmt.Println("The recovery process will be scanning the following storage pools:") for _, p := range existingPools { fmt.Printf(" - EXISTING: %q (backend=%q, source=%q)\n", p.Name, p.Driver, p.Config["source"]) } @@ -164,7 +164,7 @@ func (c *cmdRecover) Run(cmd *cobra.Command, args []string) error { return nil } - fmt.Print("Scanning for unknown volumes...\n") + fmt.Println("Scanning for unknown volumes...") // Send /internal/recover/validate request to LXD. reqValidate := internalRecoverValidatePost{ @@ -195,14 +195,14 @@ func (c *cmdRecover) Run(cmd *cobra.Command, args []string) error { } if len(res.UnknownVolumes) > 0 { - fmt.Print("The following unknown volumes have been found:\n") + fmt.Println("The following unknown volumes have been found:") for _, unknownVol := range res.UnknownVolumes { fmt.Printf(" - %s %q on pool %q in project %q (includes %d snapshots)\n", cases.Title(language.English).String(unknownVol.Type), unknownVol.Name, unknownVol.Pool, unknownVol.Project, unknownVol.SnapshotCount) } } if len(res.DependencyErrors) > 0 { - fmt.Print("You are currently missing the following:\n") + fmt.Println("You are currently missing the following:") for _, depErr := range res.DependencyErrors { fmt.Printf(" - %s\n", depErr) @@ -211,7 +211,7 @@ func (c *cmdRecover) Run(cmd *cobra.Command, args []string) error { _, _ = cli.AskString("Please create those missing entries and then hit ENTER: ", "", validate.Optional()) } else { if len(res.UnknownVolumes) <= 0 { - fmt.Print("No unknown volumes found. Nothing to do.\n") + fmt.Println("No unknown volumes found. Nothing to do.") return nil } @@ -228,7 +228,7 @@ func (c *cmdRecover) Run(cmd *cobra.Command, args []string) error { return nil } - fmt.Print("Starting recovery...\n") + fmt.Println("Starting recovery...") // Send /internal/recover/import request to LXD. // Don't lint next line with gosimple. It says we should convert reqValidate directly to an internalRecoverImportPost From 1b9d3eb31be642cd920be201900ac6bc080ede15 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Sun, 11 Jun 2023 14:30:12 -0400 Subject: [PATCH 016/543] lxd/main/cluster: use Print() when no format specifier is needed Signed-off-by: Simon Deziel --- lxd/main_cluster.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lxd/main_cluster.go b/lxd/main_cluster.go index 1fa520e0609e..41080f24ca61 100644 --- a/lxd/main_cluster.go +++ b/lxd/main_cluster.go @@ -323,7 +323,7 @@ func (c *cmdClusterShow) Run(cmd *cobra.Command, args []string) error { if len(config.Members) > 0 { fmt.Printf(SegmentComment+"\n\n%s", segmentID, data) } else { - fmt.Printf("%s", data) + fmt.Print(data) } return nil @@ -412,7 +412,7 @@ func (c *cmdClusterRecoverFromQuorumLoss) Run(cmd *cobra.Command, args []string) func (c *cmdClusterRecoverFromQuorumLoss) promptConfirmation() error { reader := bufio.NewReader(os.Stdin) - fmt.Printf(`You should run this command only if you are *absolutely* certain that this is + fmt.Print(`You should run this command only if you are *absolutely* certain that this is the only database node left in your cluster AND that other database nodes will never come back (i.e. their LXD daemon won't ever be started again). @@ -488,7 +488,7 @@ func (c *cmdClusterRemoveRaftNode) Run(cmd *cobra.Command, args []string) error func (c *cmdClusterRemoveRaftNode) promptConfirmation() error { reader := bufio.NewReader(os.Stdin) - fmt.Printf(`You should run this command only if you ended up in an + fmt.Print(`You should run this command only if you ended up in an inconsistent state where a node has been uncleanly removed (i.e. it doesn't show up in "lxc cluster list" but it's still in the raft configuration). From 6680a038743f9c21f6d48d9e22f6410c0a54e32f Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Sun, 11 Jun 2023 11:55:01 -0400 Subject: [PATCH 017/543] lxd-benchmark: use Println() instead of Printf() Signed-off-by: Simon Deziel --- lxd-benchmark/benchmark/benchmark.go | 22 +++++++++++----------- lxd-benchmark/benchmark/util.go | 18 +++++++++--------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/lxd-benchmark/benchmark/benchmark.go b/lxd-benchmark/benchmark/benchmark.go index 59091b5de592..70a60d88c6b5 100644 --- a/lxd-benchmark/benchmark/benchmark.go +++ b/lxd-benchmark/benchmark/benchmark.go @@ -22,17 +22,17 @@ func PrintServerInfo(c lxd.ContainerServer) error { } env := server.Environment - fmt.Printf("Test environment:\n") - fmt.Printf(" Server backend: %s\n", env.Server) - fmt.Printf(" Server version: %s\n", env.ServerVersion) - fmt.Printf(" Kernel: %s\n", env.Kernel) - fmt.Printf(" Kernel architecture: %s\n", env.KernelArchitecture) - fmt.Printf(" Kernel version: %s\n", env.KernelVersion) - fmt.Printf(" Storage backend: %s\n", env.Storage) - fmt.Printf(" Storage version: %s\n", env.StorageVersion) - fmt.Printf(" Container backend: %s\n", env.Driver) - fmt.Printf(" Container version: %s\n", env.DriverVersion) - fmt.Printf("\n") + fmt.Println("Test environment:") + fmt.Println(" Server backend:", env.Server) + fmt.Println(" Server version:", env.ServerVersion) + fmt.Println(" Kernel:", env.Kernel) + fmt.Println(" Kernel tecture:", env.KernelArchitecture) + fmt.Println(" Kernel version:", env.KernelVersion) + fmt.Println(" Storage backend:", env.Storage) + fmt.Println(" Storage version:", env.StorageVersion) + fmt.Println(" Container backend:", env.Driver) + fmt.Println(" Container version:", env.DriverVersion) + fmt.Println("") return nil } diff --git a/lxd-benchmark/benchmark/util.go b/lxd-benchmark/benchmark/util.go index b387d4f1719f..4047777a63a1 100644 --- a/lxd-benchmark/benchmark/util.go +++ b/lxd-benchmark/benchmark/util.go @@ -27,13 +27,13 @@ func printTestConfig(count int, batchSize int, image string, privileged bool, fr batches := count / batchSize remainder := count % batchSize - fmt.Printf("Test variables:\n") - fmt.Printf(" Container count: %d\n", count) - fmt.Printf(" Container mode: %s\n", privilegedStr) - fmt.Printf(" Startup mode: %s\n", mode) - fmt.Printf(" Image: %s\n", image) - fmt.Printf(" Batches: %d\n", batches) - fmt.Printf(" Batch size: %d\n", batchSize) - fmt.Printf(" Remainder: %d\n", remainder) - fmt.Printf("\n") + fmt.Println("Test variables:") + fmt.Println(" Container count:", count) + fmt.Println(" Container mode:", privilegedStr) + fmt.Println(" Startup mode:", mode) + fmt.Println(" Image:", image) + fmt.Println(" Batches:", batches) + fmt.Println(" Batch size:", batchSize) + fmt.Println(" Remainder:", remainder) + fmt.Println("") } From 2294dbc84c8cd4764319d1ba96c25f534864f548 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Sun, 11 Jun 2023 14:34:09 -0400 Subject: [PATCH 018/543] lxd-migrate: use Print() and Println() when no format specifier is needed Signed-off-by: Simon Deziel --- lxd-migrate/main_migrate.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lxd-migrate/main_migrate.go b/lxd-migrate/main_migrate.go index b6089d105b6b..010caf4d2865 100644 --- a/lxd-migrate/main_migrate.go +++ b/lxd-migrate/main_migrate.go @@ -134,8 +134,8 @@ func (c *cmdMigrate) askServer() (lxd.InstanceServer, string, error) { digest := shared.CertFingerprint(certificate) - fmt.Printf("Certificate fingerprint: %s\n", digest) - fmt.Printf("ok (y/n)? ") + fmt.Println("Certificate fingerprint:", digest) + fmt.Print("ok (y/n)? ") line, err := shared.ReadStdin() if err != nil { return nil, "", err @@ -393,14 +393,14 @@ func (c *cmdMigrate) RunInteractive(server lxd.InstanceServer) (cmdMigrateData, } for { - fmt.Printf("\nInstance to be created:\n") + fmt.Println("\nInstance to be created:") scanner := bufio.NewScanner(strings.NewReader(config.Render())) for scanner.Scan() { fmt.Printf(" %s\n", scanner.Text()) } - fmt.Printf(` + fmt.Print(` Additional overrides can be applied at this stage: 1) Begin the migration with the above configuration 2) Override profile list From 73af5d8101c72d444547382b08b8ffcf74d62e96 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Sun, 11 Jun 2023 15:01:52 -0400 Subject: [PATCH 019/543] lxc/info: use Print() when no format specifier is needed Signed-off-by: Simon Deziel --- lxc/info.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lxc/info.go b/lxc/info.go index 5d9f8c384c68..7cca84663cb6 100644 --- a/lxc/info.go +++ b/lxc/info.go @@ -139,7 +139,7 @@ func (c *cmdInfo) renderGPU(gpu api.ResourcesGPUCard, prefix string, initial boo if len(gpu.SRIOV.VFs) > 0 { fmt.Printf(prefix+" "+i18n.G("VFs: %d")+"\n", gpu.SRIOV.MaximumVFs) for _, vf := range gpu.SRIOV.VFs { - fmt.Printf(prefix + " - ") + fmt.Print(prefix + " - ") c.renderGPU(vf, prefix+" ", false) } } @@ -248,7 +248,7 @@ func (c *cmdInfo) renderNIC(nic api.ResourcesNetworkCard, prefix string, initial if len(nic.SRIOV.VFs) > 0 { fmt.Printf(prefix+" "+i18n.G("VFs: %d")+"\n", nic.SRIOV.MaximumVFs) for _, vf := range nic.SRIOV.VFs { - fmt.Printf(prefix + " - ") + fmt.Print(prefix + " - ") c.renderNIC(vf, prefix+" ", false) } } From 815c6e1e70d379e6685ded6d16d0783c68b79856 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Sun, 11 Jun 2023 15:03:31 -0400 Subject: [PATCH 020/543] lxc/file: use Println() when no format specifier is used Signed-off-by: Simon Deziel --- lxc/file.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxc/file.go b/lxc/file.go index 413344625325..3d8ec8e224d0 100644 --- a/lxc/file.go +++ b/lxc/file.go @@ -1196,7 +1196,7 @@ func (c *cmdFileMount) sshSFTPServer(ctx context.Context, instName string, resou if config.PasswordCallback != nil { fmt.Printf("Login with username %q and password %q\n", authUser, authPass) } else { - fmt.Printf("Login without username and password\n") + fmt.Println("Login without username and password") } for { From 0b2f4a5857f32d76e03cee74272cdd05f91f8fe8 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Sun, 11 Jun 2023 11:36:31 -0400 Subject: [PATCH 021/543] shared/cmd: use Print() when no format specifier is used Signed-off-by: Simon Deziel --- shared/cmd/ask.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared/cmd/ask.go b/shared/cmd/ask.go index bb3fc0ba3cdc..73bbb06e8a2a 100644 --- a/shared/cmd/ask.go +++ b/shared/cmd/ask.go @@ -116,7 +116,7 @@ func AskPassword(question string) string { inFirst := string(pwd) inFirst = strings.TrimSuffix(inFirst, "\n") - fmt.Printf("Again: ") + fmt.Print("Again: ") pwd, _ = term.ReadPassword(0) fmt.Println("") inSecond := string(pwd) From 19ae0d1557be7269406214c58cd36a56cc72d334 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 12 Jun 2023 13:26:38 +0100 Subject: [PATCH 022/543] lxd/instance/drivers/driver/lxc: Update initLXC to return a pointer to liblxc.Container Also add a mutex for accessing d.c var. Signed-off-by: Thomas Parrott --- lxd/instance/drivers/driver_lxc.go | 161 +++++++++++++++-------------- 1 file changed, 85 insertions(+), 76 deletions(-) diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go index 64136f620951..a62bd45416c7 100644 --- a/lxd/instance/drivers/driver_lxc.go +++ b/lxd/instance/drivers/driver_lxc.go @@ -363,6 +363,9 @@ func lxcUnload(d *lxc) { // release releases any internal reference to a liblxc container, invalidating the go-lxc cache. func (d *lxc) release() { + d.cMu.Lock() + defer d.cMu.Unlock() + if d.c != nil { _ = d.c.Release() d.c = nil @@ -423,10 +426,13 @@ type lxc struct { // Config handling. fromHook bool + cMu sync.Mutex + // Cached handles. // Do not use these variables directly, instead use their associated get functions so they // will be initialised on demand. - c *liblxc.Container + c *liblxc.Container // Use d.initLXC() instead of accessing this directly. + cConfig bool idmapset *idmap.IdmapSet } @@ -626,20 +632,23 @@ func (d *lxc) init() error { return nil } -func (d *lxc) initLXC(config bool) error { +func (d *lxc) initLXC(config bool) (*liblxc.Container, error) { + d.cMu.Lock() + defer d.cMu.Unlock() + // No need to go through all that for snapshots if d.IsSnapshot() { - return nil + return nil, nil } // Check if being called from a hook if d.fromHook { - return fmt.Errorf("You can't use go-lxc from inside a LXC hook") + return nil, fmt.Errorf("You can't use go-lxc from inside a LXC hook") } // Check if already initialized if d.c != nil && (!config || d.cConfig) { - return nil + return d.c, nil } revert := revert.New() @@ -649,7 +658,7 @@ func (d *lxc) initLXC(config bool) error { cname := project.Instance(d.Project().Name, d.Name()) cc, err := liblxc.NewContainer(cname, d.state.OS.LxcPath) if err != nil { - return err + return nil, err } revert.Add(func() { @@ -659,14 +668,14 @@ func (d *lxc) initLXC(config bool) error { // Load cgroup abstraction cg, err := d.cgroup(cc) if err != nil { - return err + return nil, err } // Setup logging logfile := d.LogFilePath() err = lxcSetConfigItem(cc, "lxc.log.file", logfile) if err != nil { - return err + return nil, err } logLevel := "warn" @@ -678,19 +687,19 @@ func (d *lxc) initLXC(config bool) error { err = lxcSetConfigItem(cc, "lxc.log.level", logLevel) if err != nil { - return err + return nil, err } if liblxc.RuntimeLiblxcVersionAtLeast(liblxc.Version(), 3, 0, 0) { // Default size log buffer err = lxcSetConfigItem(cc, "lxc.console.buffer.size", "auto") if err != nil { - return err + return nil, err } err = lxcSetConfigItem(cc, "lxc.console.size", "auto") if err != nil { - return err + return nil, err } // File to dump ringbuffer contents to when requested or @@ -698,19 +707,19 @@ func (d *lxc) initLXC(config bool) error { consoleBufferLogFile := d.ConsoleBufferLogPath() err = lxcSetConfigItem(cc, "lxc.console.logfile", consoleBufferLogFile) if err != nil { - return err + return nil, err } } if d.state.OS.ContainerCoreScheduling { err = lxcSetConfigItem(cc, "lxc.sched.core", "1") if err != nil { - return err + return nil, err } } else if d.state.OS.CoreScheduling { err = lxcSetConfigItem(cc, "lxc.hook.start-host", fmt.Sprintf("/proc/%d/exe forkcoresched 1", os.Getpid())) if err != nil { - return err + return nil, err } } @@ -724,7 +733,7 @@ func (d *lxc) initLXC(config bool) error { d.c = cc revert.Success() - return nil + return cc, err } if d.IsPrivileged() { @@ -736,7 +745,7 @@ func (d *lxc) initLXC(config bool) error { err = lxcSetConfigItem(cc, "lxc.cap.drop", toDrop) if err != nil { - return err + return nil, err } } @@ -763,17 +772,17 @@ func (d *lxc) initLXC(config bool) error { err = lxcSetConfigItem(cc, "lxc.mount.auto", strings.Join(mounts, " ")) if err != nil { - return err + return nil, err } err = lxcSetConfigItem(cc, "lxc.autodev", "1") if err != nil { - return err + return nil, err } err = lxcSetConfigItem(cc, "lxc.pty.max", "1024") if err != nil { - return err + return nil, err } bindMounts := []string{ @@ -792,7 +801,7 @@ func (d *lxc) initLXC(config bool) error { if d.IsPrivileged() && !d.state.OS.RunningInUserNS { err = lxcSetConfigItem(cc, "lxc.mount.entry", "mqueue dev/mqueue mqueue rw,relatime,create=dir,optional 0 0") if err != nil { - return err + return nil, err } } else { bindMounts = append(bindMounts, "/dev/mqueue") @@ -806,12 +815,12 @@ func (d *lxc) initLXC(config bool) error { if shared.IsDir(mnt) { err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none rbind,create=dir,optional 0 0", mnt, strings.TrimPrefix(mnt, "/"))) if err != nil { - return err + return nil, err } } else { err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none bind,create=file,optional 0 0", mnt, strings.TrimPrefix(mnt, "/"))) if err != nil { - return err + return nil, err } } } @@ -825,7 +834,7 @@ func (d *lxc) initLXC(config bool) error { if shared.PathExists(fmt.Sprintf("%s/common.conf.d/", templateConfDir)) { err = lxcSetConfigItem(cc, "lxc.include", fmt.Sprintf("%s/common.conf.d/", templateConfDir)) if err != nil { - return err + return nil, err } } @@ -838,7 +847,7 @@ func (d *lxc) initLXC(config bool) error { } if err != nil { - return err + return nil, err } devices := []string{ @@ -865,7 +874,7 @@ func (d *lxc) initLXC(config bool) error { } if err != nil { - return err + return nil, err } } } @@ -877,12 +886,12 @@ func (d *lxc) initLXC(config bool) error { */ err = lxcSetConfigItem(cc, "lxc.mount.entry", "proc dev/.lxc/proc proc create=dir,optional 0 0") if err != nil { - return err + return nil, err } err = lxcSetConfigItem(cc, "lxc.mount.entry", "sys dev/.lxc/sys sysfs create=dir,optional 0 0") if err != nil { - return err + return nil, err } } @@ -891,56 +900,56 @@ func (d *lxc) initLXC(config bool) error { if err != nil { personality, err = osarch.ArchitecturePersonality(d.state.OS.Architectures[0]) if err != nil { - return err + return nil, err } } err = lxcSetConfigItem(cc, "lxc.arch", personality) if err != nil { - return err + return nil, err } // Setup the hooks err = lxcSetConfigItem(cc, "lxc.hook.version", "1") if err != nil { - return err + return nil, err } // Call the onstart hook on start. err = lxcSetConfigItem(cc, "lxc.hook.pre-start", fmt.Sprintf("/proc/%d/exe callhook %s %s %s start", os.Getpid(), shared.VarPath(""), strconv.Quote(d.Project().Name), strconv.Quote(d.Name()))) if err != nil { - return err + return nil, err } // Call the onstopns hook on stop but before namespaces are unmounted. err = lxcSetConfigItem(cc, "lxc.hook.stop", fmt.Sprintf("%s callhook %s %s %s stopns", d.state.OS.ExecPath, shared.VarPath(""), strconv.Quote(d.Project().Name), strconv.Quote(d.Name()))) if err != nil { - return err + return nil, err } // Call the onstop hook on stop. err = lxcSetConfigItem(cc, "lxc.hook.post-stop", fmt.Sprintf("%s callhook %s %s %s stop", d.state.OS.ExecPath, shared.VarPath(""), strconv.Quote(d.Project().Name), strconv.Quote(d.Name()))) if err != nil { - return err + return nil, err } // Setup the console err = lxcSetConfigItem(cc, "lxc.tty.max", "0") if err != nil { - return err + return nil, err } // Setup the hostname err = lxcSetConfigItem(cc, "lxc.uts.name", d.Name()) if err != nil { - return err + return nil, err } // Setup devlxd if shared.IsTrueOrEmpty(d.expandedConfig["security.devlxd"]) { err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/lxd none bind,create=dir 0 0", shared.VarPath("devlxd"))) if err != nil { - return err + return nil, err } } @@ -952,7 +961,7 @@ func (d *lxc) initLXC(config bool) error { curProfile = strings.TrimSuffix(curProfile, " (enforce)") err := lxcSetConfigItem(cc, "lxc.apparmor.profile", curProfile) if err != nil { - return err + return nil, err } } else { // If not currently confined, use the container's profile @@ -971,7 +980,7 @@ func (d *lxc) initLXC(config bool) error { err := lxcSetConfigItem(cc, "lxc.apparmor.profile", profile) if err != nil { - return err + return nil, err } } } else { @@ -984,7 +993,7 @@ func (d *lxc) initLXC(config bool) error { if seccomp.InstanceNeedsPolicy(d) { err = lxcSetConfigItem(cc, "lxc.seccomp.profile", seccomp.ProfilePath(d)) if err != nil { - return err + return nil, err } // Setup notification socket @@ -993,7 +1002,7 @@ func (d *lxc) initLXC(config bool) error { if err == nil && ok { err = lxcSetConfigItem(cc, "lxc.seccomp.notify.proxy", fmt.Sprintf("unix:%s", shared.VarPath("seccomp.socket"))) if err != nil { - return err + return nil, err } } } @@ -1001,7 +1010,7 @@ func (d *lxc) initLXC(config bool) error { // Setup idmap idmapset, err := d.NextIdmap() if err != nil { - return err + return nil, err } if idmapset != nil { @@ -1009,7 +1018,7 @@ func (d *lxc) initLXC(config bool) error { for _, line := range lines { err := lxcSetConfigItem(cc, "lxc.idmap", line) if err != nil { - return err + return nil, err } } } @@ -1019,7 +1028,7 @@ func (d *lxc) initLXC(config bool) error { if strings.HasPrefix(k, "environment.") { err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v)) if err != nil { - return err + return nil, err } } } @@ -1033,29 +1042,29 @@ func (d *lxc) initLXC(config bool) error { hookPath := filepath.Join(hookDir, "nvidia") if !shared.PathExists(hookPath) { - return fmt.Errorf("The NVIDIA LXC hook couldn't be found") + return nil, fmt.Errorf("The NVIDIA LXC hook couldn't be found") } _, err := exec.LookPath("nvidia-container-cli") if err != nil { - return fmt.Errorf("The NVIDIA container tools couldn't be found") + return nil, fmt.Errorf("The NVIDIA container tools couldn't be found") } err = lxcSetConfigItem(cc, "lxc.environment", "NVIDIA_VISIBLE_DEVICES=none") if err != nil { - return err + return nil, err } nvidiaDriver := d.expandedConfig["nvidia.driver.capabilities"] if nvidiaDriver == "" { err = lxcSetConfigItem(cc, "lxc.environment", "NVIDIA_DRIVER_CAPABILITIES=compute,utility") if err != nil { - return err + return nil, err } } else { err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_DRIVER_CAPABILITIES=%s", nvidiaDriver)) if err != nil { - return err + return nil, err } } @@ -1063,7 +1072,7 @@ func (d *lxc) initLXC(config bool) error { if nvidiaRequireCuda == "" { err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_REQUIRE_CUDA=%s", nvidiaRequireCuda)) if err != nil { - return err + return nil, err } } @@ -1071,13 +1080,13 @@ func (d *lxc) initLXC(config bool) error { if nvidiaRequireDriver == "" { err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_REQUIRE_DRIVER=%s", nvidiaRequireDriver)) if err != nil { - return err + return nil, err } } err = lxcSetConfigItem(cc, "lxc.hook.mount", hookPath) if err != nil { - return err + return nil, err } } @@ -1094,49 +1103,49 @@ func (d *lxc) initLXC(config bool) error { if strings.HasSuffix(memory, "%") { percent, err := strconv.ParseInt(strings.TrimSuffix(memory, "%"), 10, 64) if err != nil { - return err + return nil, err } memoryTotal, err := shared.DeviceTotalMemory() if err != nil { - return err + return nil, err } valueInt = int64((memoryTotal / 100) * percent) } else { valueInt, err = units.ParseByteSizeString(memory) if err != nil { - return err + return nil, err } } if memoryEnforce == "soft" { err = cg.SetMemorySoftLimit(valueInt) if err != nil { - return err + return nil, err } } else { if d.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) && shared.IsTrueOrEmpty(memorySwap) { err = cg.SetMemoryLimit(valueInt) if err != nil { - return err + return nil, err } err = cg.SetMemorySwapLimit(0) if err != nil { - return err + return nil, err } } else { err = cg.SetMemoryLimit(valueInt) if err != nil { - return err + return nil, err } } // Set soft limit to value 10% less than hard limit err = cg.SetMemorySoftLimit(int64(float64(valueInt) * 0.9)) if err != nil { - return err + return nil, err } } } @@ -1146,18 +1155,18 @@ func (d *lxc) initLXC(config bool) error { if shared.IsFalse(memorySwap) { err = cg.SetMemorySwappiness(0) if err != nil { - return err + return nil, err } } else if memorySwapPriority != "" { priority, err := strconv.Atoi(memorySwapPriority) if err != nil { - return err + return nil, err } // Maximum priority (10) should be default swappiness (60). err = cg.SetMemorySwappiness(int64(70 - priority)) if err != nil { - return err + return nil, err } } } @@ -1170,20 +1179,20 @@ func (d *lxc) initLXC(config bool) error { if (cpuPriority != "" || cpuAllowance != "") && d.state.OS.CGInfo.Supports(cgroup.CPU, cg) { cpuShares, cpuCfsQuota, cpuCfsPeriod, err := cgroup.ParseCPU(cpuAllowance, cpuPriority) if err != nil { - return err + return nil, err } if cpuShares != 1024 { err = cg.SetCPUShare(cpuShares) if err != nil { - return err + return nil, err } } if cpuCfsPeriod != -1 && cpuCfsQuota != -1 { err = cg.SetCPUCfsLimit(cpuCfsPeriod, cpuCfsQuota) if err != nil { - return err + return nil, err } } } @@ -1194,7 +1203,7 @@ func (d *lxc) initLXC(config bool) error { if d.state.OS.CGInfo.Supports(cgroup.BlkioWeight, nil) { priorityInt, err := strconv.Atoi(diskPriority) if err != nil { - return err + return nil, err } priority := priorityInt * 100 @@ -1206,10 +1215,10 @@ func (d *lxc) initLXC(config bool) error { err = cg.SetBlkioWeight(int64(priority)) if err != nil { - return err + return nil, err } } else { - return fmt.Errorf("Cannot apply limits.disk.priority as blkio.weight cgroup controller is missing") + return nil, fmt.Errorf("Cannot apply limits.disk.priority as blkio.weight cgroup controller is missing") } } @@ -1219,12 +1228,12 @@ func (d *lxc) initLXC(config bool) error { if processes != "" { valueInt, err := strconv.ParseInt(processes, 10, 64) if err != nil { - return err + return nil, err } err = cg.SetMaxProcesses(valueInt) if err != nil { - return err + return nil, err } } } @@ -1236,12 +1245,12 @@ func (d *lxc) initLXC(config bool) error { if value != "" { value, err := units.ParseByteSizeString(value) if err != nil { - return err + return nil, err } err = cg.SetHugepagesLimit(shared.HugePageSizeSuffix[i], value) if err != nil { - return err + return nil, err } } } @@ -1254,7 +1263,7 @@ func (d *lxc) initLXC(config bool) error { prlimitKey := fmt.Sprintf("lxc.prlimit.%s", prlimitSuffix) err = lxcSetConfigItem(cc, prlimitKey, v) if err != nil { - return err + return nil, err } } } @@ -1266,7 +1275,7 @@ func (d *lxc) initLXC(config bool) error { sysctlKey := fmt.Sprintf("lxc.sysctl.%s", sysctlSuffix) err = lxcSetConfigItem(cc, sysctlKey, v) if err != nil { - return err + return nil, err } } } @@ -1279,7 +1288,7 @@ func (d *lxc) initLXC(config bool) error { } if err != nil { - return err + return nil, err } if d.c != nil { @@ -1288,7 +1297,7 @@ func (d *lxc) initLXC(config bool) error { d.c = cc revert.Success() - return nil + return cc, err } var idmappedStorageMap map[unix.Fsid]idmap.IdmapStorageType = map[unix.Fsid]idmap.IdmapStorageType{} From 6d6f7d0598356e4215ba534a6f55d15889e91431 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 12 Jun 2023 13:29:34 +0100 Subject: [PATCH 023/543] lxd/instance/drivers/driver/lxc: Update cgroup to require being passed a liblxc.Container Signed-off-by: Thomas Parrott --- lxd/instance/drivers/driver_lxc.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go index a62bd45416c7..09d49deaa1c4 100644 --- a/lxd/instance/drivers/driver_lxc.go +++ b/lxd/instance/drivers/driver_lxc.go @@ -8072,8 +8072,6 @@ func (d *lxc) cgroup(cc *liblxc.Container) (*cgroup.CGroup, error) { if cc != nil { rw.cc = cc rw.conf = true - } else { - rw.cc = d.c } if rw.cc == nil { From 7138b82eb69f60f99c7e551a849f89cdaa6154d7 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 12 Jun 2023 13:31:17 +0100 Subject: [PATCH 024/543] lxd/instance/drivers/driver/lxc: Updates loadRawLXCConfig to accept a liblxc.Container Signed-off-by: Thomas Parrott --- lxd/instance/drivers/driver_lxc.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go index 09d49deaa1c4..486465346c86 100644 --- a/lxd/instance/drivers/driver_lxc.go +++ b/lxd/instance/drivers/driver_lxc.go @@ -8425,7 +8425,7 @@ func (d *lxc) getFSStats() (*metrics.MetricSet, error) { return out, nil } -func (d *lxc) loadRawLXCConfig() error { +func (d *lxc) loadRawLXCConfig(cc *liblxc.Container) error { // Load the LXC raw config. lxcConfig, ok := d.expandedConfig["raw.lxc"] if !ok { @@ -8449,7 +8449,7 @@ func (d *lxc) loadRawLXCConfig() error { } // Load the config. - err = d.c.LoadConfigFile(f.Name()) + err = cc.LoadConfigFile(f.Name()) if err != nil { return fmt.Errorf("Failed to load config file %q: %w", f.Name(), err) } From 12283d01a96c126c8a71e8baf2bd6addda1ce502 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 12 Jun 2023 13:31:57 +0100 Subject: [PATCH 025/543] lxd/instance/drivers/driver/lxc: Update to use local liblxc.Container returned from d.initLXC This helps to reason about access to the d.c variable and controls its initialisation via a mutex. Signed-off-by: Thomas Parrott --- lxd/instance/drivers/driver_lxc.go | 207 +++++++++++++++++------------ 1 file changed, 121 insertions(+), 86 deletions(-) diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go index 486465346c86..a737e32cd3ac 100644 --- a/lxd/instance/drivers/driver_lxc.go +++ b/lxd/instance/drivers/driver_lxc.go @@ -1479,7 +1479,12 @@ func (d *lxc) deviceStaticShiftMounts(mounts []deviceConfig.MountEntryItem) erro // deviceAddCgroupRules live adds cgroup rules to a container. func (d *lxc) deviceAddCgroupRules(cgroups []deviceConfig.RunConfigItem) error { - cg, err := d.cgroup(nil) + cc, err := d.initLXC(false) + if err != nil { + return err + } + + cg, err := d.cgroup(cc) if err != nil { return err } @@ -1515,13 +1520,13 @@ func (d *lxc) deviceAttachNIC(configCopy map[string]string, netIF []deviceConfig } // Load the go-lxc struct. - err := d.initLXC(false) + cc, err := d.initLXC(false) if err != nil { return err } // Add the interface to the container. - err = d.c.AttachInterface(devName, configCopy["name"]) + err = cc.AttachInterface(devName, configCopy["name"]) if err != nil { return fmt.Errorf("Failed to attach interface: %s to %s: %w", devName, configCopy["name"], err) } @@ -1875,7 +1880,7 @@ func (d *lxc) startCommon() (string, []func() error, error) { defer revert.Fail() // Load the go-lxc struct - err := d.initLXC(true) + cc, err := d.initLXC(true) if err != nil { return "", nil, fmt.Errorf("Load go-lxc struct: %w", err) } @@ -2049,11 +2054,11 @@ func (d *lxc) startCommon() (string, []func() error, error) { if runConf.RootFS.Path != "" { if !liblxc.RuntimeLiblxcVersionAtLeast(liblxc.Version(), 2, 1, 0) { // Set the rootfs backend type if supported (must happen before any other lxc.rootfs) - err := lxcSetConfigItem(d.c, "lxc.rootfs.backend", "dir") + err := lxcSetConfigItem(cc, "lxc.rootfs.backend", "dir") if err == nil { - value := d.c.ConfigItem("lxc.rootfs.backend") + value := cc.ConfigItem("lxc.rootfs.backend") if len(value) == 0 || value[0] != "dir" { - _ = lxcSetConfigItem(d.c, "lxc.rootfs.backend", "") + _ = lxcSetConfigItem(cc, "lxc.rootfs.backend", "") } } } @@ -2066,9 +2071,9 @@ func (d *lxc) startCommon() (string, []func() error, error) { if liblxc.RuntimeLiblxcVersionAtLeast(liblxc.Version(), 2, 1, 0) { rootfsPath := fmt.Sprintf("dir:%s", absoluteRootfs) - err = lxcSetConfigItem(d.c, "lxc.rootfs.path", rootfsPath) + err = lxcSetConfigItem(cc, "lxc.rootfs.path", rootfsPath) } else { - err = lxcSetConfigItem(d.c, "lxc.rootfs", absoluteRootfs) + err = lxcSetConfigItem(cc, "lxc.rootfs", absoluteRootfs) } if err != nil { @@ -2076,7 +2081,7 @@ func (d *lxc) startCommon() (string, []func() error, error) { } if len(runConf.RootFS.Opts) > 0 { - err = lxcSetConfigItem(d.c, "lxc.rootfs.options", strings.Join(runConf.RootFS.Opts, ",")) + err = lxcSetConfigItem(cc, "lxc.rootfs.options", strings.Join(runConf.RootFS.Opts, ",")) if err != nil { return "", nil, fmt.Errorf("Failed to setup device rootfs %q: %w", dev.Name(), err) } @@ -2084,25 +2089,25 @@ func (d *lxc) startCommon() (string, []func() error, error) { if !d.IsPrivileged() { if idmapType == idmap.IdmapStorageIdmapped { - err = lxcSetConfigItem(d.c, "lxc.rootfs.options", "idmap=container") + err = lxcSetConfigItem(cc, "lxc.rootfs.options", "idmap=container") if err != nil { return "", nil, fmt.Errorf("Failed to set \"idmap=container\" rootfs option: %w", err) } } else if idmapType == idmap.IdmapStorageShiftfs { // Host side mark mount. - err = lxcSetConfigItem(d.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", strconv.Quote(d.RootfsPath()), strconv.Quote(d.RootfsPath()))) + err = lxcSetConfigItem(cc, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", strconv.Quote(d.RootfsPath()), strconv.Quote(d.RootfsPath()))) if err != nil { return "", nil, fmt.Errorf("Failed to setup device mount shiftfs %q: %w", dev.Name(), err) } // Container side shift mount. - err = lxcSetConfigItem(d.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", strconv.Quote(d.RootfsPath()), strconv.Quote(d.RootfsPath()))) + err = lxcSetConfigItem(cc, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", strconv.Quote(d.RootfsPath()), strconv.Quote(d.RootfsPath()))) if err != nil { return "", nil, fmt.Errorf("Failed to setup device mount shiftfs %q: %w", dev.Name(), err) } // Host side umount of mark mount. - err = lxcSetConfigItem(d.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", strconv.Quote(d.RootfsPath()))) + err = lxcSetConfigItem(cc, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", strconv.Quote(d.RootfsPath()))) if err != nil { return "", nil, fmt.Errorf("Failed to setup device mount shiftfs %q: %w", dev.Name(), err) } @@ -2118,9 +2123,9 @@ func (d *lxc) startCommon() (string, []func() error, error) { } if d.state.OS.CGInfo.Layout == cgroup.CgroupsUnified { - err = lxcSetConfigItem(d.c, fmt.Sprintf("lxc.cgroup2.%s", rule.Key), rule.Value) + err = lxcSetConfigItem(cc, fmt.Sprintf("lxc.cgroup2.%s", rule.Key), rule.Value) } else { - err = lxcSetConfigItem(d.c, fmt.Sprintf("lxc.cgroup.%s", rule.Key), rule.Value) + err = lxcSetConfigItem(cc, fmt.Sprintf("lxc.cgroup.%s", rule.Key), rule.Value) } if err != nil { @@ -2143,17 +2148,17 @@ func (d *lxc) startCommon() (string, []func() error, error) { case idmap.IdmapStorageIdmapped: mntOptions = strings.Join([]string{mntOptions, "idmap=container"}, ",") case idmap.IdmapStorageShiftfs: - err = lxcSetConfigItem(d.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", strconv.Quote(mount.DevPath), strconv.Quote(mount.DevPath))) + err = lxcSetConfigItem(cc, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", strconv.Quote(mount.DevPath), strconv.Quote(mount.DevPath))) if err != nil { return "", nil, fmt.Errorf("Failed to setup device mount shiftfs %q: %w", dev.Name(), err) } - err = lxcSetConfigItem(d.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", strconv.Quote(mount.DevPath), strconv.Quote(mount.DevPath))) + err = lxcSetConfigItem(cc, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", strconv.Quote(mount.DevPath), strconv.Quote(mount.DevPath))) if err != nil { return "", nil, fmt.Errorf("Failed to setup device mount shiftfs %q: %w", dev.Name(), err) } - err = lxcSetConfigItem(d.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", strconv.Quote(mount.DevPath))) + err = lxcSetConfigItem(cc, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", strconv.Quote(mount.DevPath))) if err != nil { return "", nil, fmt.Errorf("Failed to setup device mount shiftfs %q: %w", dev.Name(), err) } @@ -2164,7 +2169,7 @@ func (d *lxc) startCommon() (string, []func() error, error) { } mntVal := fmt.Sprintf("%s %s %s %s %d %d", shared.EscapePathFstab(mount.DevPath), shared.EscapePathFstab(mount.TargetPath), mount.FSType, mntOptions, mount.Freq, mount.PassNo) - err = lxcSetConfigItem(d.c, "lxc.mount.entry", mntVal) + err = lxcSetConfigItem(cc, "lxc.mount.entry", mntVal) if err != nil { return "", nil, fmt.Errorf("Failed to setup device mount %q: %w", dev.Name(), err) } @@ -2182,7 +2187,7 @@ func (d *lxc) startCommon() (string, []func() error, error) { } for _, nicItem := range runConf.NetworkInterface { - err = lxcSetConfigItem(d.c, fmt.Sprintf("%s.%d.%s", networkKeyPrefix, nicID, nicItem.Key), nicItem.Value) + err = lxcSetConfigItem(cc, fmt.Sprintf("%s.%d.%s", networkKeyPrefix, nicID, nicItem.Key), nicItem.Value) if err != nil { return "", nil, fmt.Errorf("Failed to setup device network interface %q: %w", dev.Name(), err) } @@ -2206,21 +2211,21 @@ func (d *lxc) startCommon() (string, []func() error, error) { // Override NVIDIA_VISIBLE_DEVICES if we have devices that need it. if len(nvidiaDevices) > 0 { - err = lxcSetConfigItem(d.c, "lxc.environment", fmt.Sprintf("NVIDIA_VISIBLE_DEVICES=%s", strings.Join(nvidiaDevices, ","))) + err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_VISIBLE_DEVICES=%s", strings.Join(nvidiaDevices, ","))) if err != nil { return "", nil, fmt.Errorf("Unable to set NVIDIA_VISIBLE_DEVICES in LXC environment: %w", err) } } // Load the LXC raw config. - err = d.loadRawLXCConfig() + err = d.loadRawLXCConfig(cc) if err != nil { return "", nil, err } // Generate the LXC config configPath := filepath.Join(d.LogPath(), "lxc.conf") - err = d.c.SaveConfigFile(configPath) + err = cc.SaveConfigFile(configPath) if err != nil { _ = os.Remove(configPath) return "", nil, err @@ -2592,20 +2597,21 @@ func (d *lxc) Stop(stateful bool) error { }() // Load the go-lxc struct + var cc *liblxc.Container if d.expandedConfig["raw.lxc"] != "" { - err = d.initLXC(true) + cc, err = d.initLXC(true) if err != nil { op.Done(err) return err } - err = d.loadRawLXCConfig() + err = d.loadRawLXCConfig(cc) if err != nil { op.Done(err) return err } } else { - err = d.initLXC(false) + cc, err = d.initLXC(false) if err != nil { op.Done(err) return err @@ -2661,7 +2667,7 @@ func (d *lxc) Stop(stateful bool) error { } // Load cgroup abstraction - cg, err := d.cgroup(nil) + cg, err := d.cgroup(cc) if err != nil { op.Done(err) return err @@ -2686,7 +2692,7 @@ func (d *lxc) Stop(stateful bool) error { } } - err = d.c.Stop() + err = cc.Stop() if err != nil { op.Done(err) return err @@ -2769,20 +2775,21 @@ func (d *lxc) Shutdown(timeout time.Duration) error { }() // Load the go-lxc struct + var cc *liblxc.Container if d.expandedConfig["raw.lxc"] != "" { - err = d.initLXC(true) + cc, err = d.initLXC(true) if err != nil { op.Done(err) return err } - err = d.loadRawLXCConfig() + err = d.loadRawLXCConfig(cc) if err != nil { op.Done(err) return err } } else { - err = d.initLXC(false) + cc, err = d.initLXC(false) if err != nil { op.Done(err) return err @@ -2791,7 +2798,7 @@ func (d *lxc) Shutdown(timeout time.Duration) error { // Request shutdown, but don't wait for container to stop. If call fails then cancel operation with error, // otherwise expect the onStop() hook to cancel operation when done (when the container has stopped). - err = d.c.Shutdown(0) + err = cc.Shutdown(0) if err != nil { op.Done(err) } @@ -3033,7 +3040,15 @@ func (d *lxc) Freeze() error { return fmt.Errorf("The instance isn't running") } - cg, err := d.cgroup(nil) + // Load the go-lxc struct + cc, err := d.initLXC(false) + if err != nil { + ctxMap["err"] = err + d.logger.Error("Failed freezing container", ctxMap) + return err + } + + cg, err := d.cgroup(cc) if err != nil { return err } @@ -3051,15 +3066,7 @@ func (d *lxc) Freeze() error { d.logger.Info("Freezing container", ctxMap) - // Load the go-lxc struct - err = d.initLXC(false) - if err != nil { - ctxMap["err"] = err - d.logger.Error("Failed freezing container", ctxMap) - return err - } - - err = d.c.Freeze() + err = cc.Freeze() if err != nil { ctxMap["err"] = err d.logger.Error("Failed freezing container", ctxMap) @@ -3084,7 +3091,14 @@ func (d *lxc) Unfreeze() error { return fmt.Errorf("The container isn't running") } - cg, err := d.cgroup(nil) + // Load the go-lxc struct + cc, err := d.initLXC(false) + if err != nil { + d.logger.Error("Failed unfreezing container", ctxMap) + return err + } + + cg, err := d.cgroup(cc) if err != nil { return err } @@ -3102,14 +3116,7 @@ func (d *lxc) Unfreeze() error { d.logger.Info("Unfreezing container", ctxMap) - // Load the go-lxc struct - err = d.initLXC(false) - if err != nil { - d.logger.Error("Failed unfreezing container", ctxMap) - return err - } - - err = d.c.Unfreeze() + err = cc.Unfreeze() if err != nil { d.logger.Error("Failed unfreezing container", ctxMap) } @@ -3128,20 +3135,16 @@ func (d *lxc) getLxcState() (liblxc.State, error) { } // Load the go-lxc struct - err := d.initLXC(false) + cc, err := d.initLXC(false) if err != nil { return liblxc.StateMap["STOPPED"], err } - if d.c == nil { - return liblxc.StateMap["STOPPED"], nil - } - monitor := make(chan liblxc.State, 1) go func(c *liblxc.Container) { monitor <- c.State() - }(d.c) + }(cc) select { case state := <-monitor: @@ -3348,17 +3351,17 @@ func (d *lxc) snapshot(name string, expiry time.Time, stateful bool) error { // Load the go-lxc struct if d.expandedConfig["raw.lxc"] != "" { - err = d.initLXC(true) + cc, err := d.initLXC(true) if err != nil { return err } - err = d.loadRawLXCConfig() + err = d.loadRawLXCConfig(cc) if err != nil { return err } } else { - err = d.initLXC(false) + _, err = d.initLXC(false) if err != nil { return err } @@ -3944,7 +3947,7 @@ func (d *lxc) Rename(newName string, applyTemplateTrigger bool) error { // CGroupSet sets a cgroup value for the instance. func (d *lxc) CGroupSet(key string, value string) error { // Load the go-lxc struct - err := d.initLXC(false) + cc, err := d.initLXC(false) if err != nil { return err } @@ -3954,7 +3957,7 @@ func (d *lxc) CGroupSet(key string, value string) error { return fmt.Errorf("Can't set cgroups on a stopped container") } - err = d.c.SetCgroupItem(key, value) + err = cc.SetCgroupItem(key, value) if err != nil { return fmt.Errorf("Failed to set cgroup %s=\"%s\": %w", key, value, err) } @@ -4101,7 +4104,7 @@ func (d *lxc) Update(args db.InstanceArgs, userRequested bool) error { d.expiryDate = oldExpiryDate d.release() d.cConfig = false - _ = d.initLXC(true) + _, _ = d.initLXC(true) cgroup.TaskSchedulerTrigger("container", d.name, "changed") } }() @@ -4202,7 +4205,7 @@ func (d *lxc) Update(args db.InstanceArgs, userRequested bool) error { if userRequested { d.release() d.cConfig = false - err = d.initLXC(true) + _, err = d.initLXC(true) if err != nil { return fmt.Errorf("Initialize LXC: %w", err) } @@ -4216,7 +4219,7 @@ func (d *lxc) Update(args db.InstanceArgs, userRequested bool) error { return err } - err = d.loadRawLXCConfig() + err = d.loadRawLXCConfig(cc) if err != nil { // Release the liblxc instance. _ = cc.Release() @@ -4298,7 +4301,12 @@ func (d *lxc) Update(args db.InstanceArgs, userRequested bool) error { // Apply the live changes if isRunning { - cg, err := d.cgroup(nil) + cc, err := d.initLXC(false) + if err != nil { + return err + } + + cg, err := d.cgroup(cc) if err != nil { return err } @@ -6333,18 +6341,19 @@ func (d *lxc) migrate(args *instance.CriuMigrationArgs) error { } } else { // Load the go-lxc struct + var cc *liblxc.Container if d.expandedConfig["raw.lxc"] != "" { - err = d.initLXC(true) + cc, err = d.initLXC(true) if err != nil { return err } - err = d.loadRawLXCConfig() + err = d.loadRawLXCConfig(cc) if err != nil { return err } } else { - err = d.initLXC(false) + cc, err = d.initLXC(false) if err != nil { return err } @@ -6385,7 +6394,7 @@ func (d *lxc) migrate(args *instance.CriuMigrationArgs) error { args.Stop = false } - migrateErr = d.c.Migrate(args.Cmd, opts) + migrateErr = cc.Migrate(args.Cmd, opts) } collectErr := collectCRIULogFile(d, finalStateDir, args.Function, prettyCmd) @@ -6911,7 +6920,12 @@ func (d *lxc) Console(protocol string) (*os.File, chan error, error) { // ConsoleLog returns console log. func (d *lxc) ConsoleLog(opts liblxc.ConsoleLogOptions) (string, error) { - msg, err := d.c.ConsoleLog(opts) + cc, err := d.initLXC(false) + if err != nil { + return "", err + } + + msg, err := cc.ConsoleLog(opts) if err != nil { return "", err } @@ -7031,8 +7045,13 @@ func (d *lxc) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, st func (d *lxc) cpuState() api.InstanceStateCPU { cpu := api.InstanceStateCPU{} + cc, err := d.initLXC(false) + if err != nil { + return cpu + } + // CPU usage in seconds - cg, err := d.cgroup(nil) + cg, err := d.cgroup(cc) if err != nil { return cpu } @@ -7104,7 +7123,13 @@ func (d *lxc) diskState() map[string]api.InstanceStateDisk { func (d *lxc) memoryState() api.InstanceStateMemory { memory := api.InstanceStateMemory{} - cg, err := d.cgroup(nil) + + cc, err := d.initLXC(false) + if err != nil { + return memory + } + + cg, err := d.cgroup(cc) if err != nil { return memory } @@ -7223,7 +7248,12 @@ func (d *lxc) processesState(pid int) (int64, error) { return 0, fmt.Errorf("PID of LXC instance could not be initialized") } - cg, err := d.cgroup(nil) + cc, err := d.initLXC(false) + if err != nil { + return -1, err + } + + cg, err := d.cgroup(cc) if err != nil { return 0, err } @@ -7839,13 +7869,13 @@ func (d *lxc) removeDiskDevices() error { // Network I/O limits. func (d *lxc) setNetworkPriority() error { // Load the go-lxc struct. - err := d.initLXC(false) + cc, err := d.initLXC(false) if err != nil { return err } // Load the cgroup struct. - cg, err := d.cgroup(nil) + cg, err := d.cgroup(cc) if err != nil { return err } @@ -7955,29 +7985,29 @@ func (d *lxc) LockExclusive() (*operationlock.InstanceOperation, error) { // InitPID returns PID of init process. func (d *lxc) InitPID() int { // Load the go-lxc struct - err := d.initLXC(false) + cc, err := d.initLXC(false) if err != nil { return -1 } - return d.c.InitPid() + return cc.InitPid() } // InitPidFd returns pidfd of init process. func (d *lxc) InitPidFd() (*os.File, error) { // Load the go-lxc struct - err := d.initLXC(false) + cc, err := d.initLXC(false) if err != nil { return nil, err } - return d.c.InitPidFd() + return cc.InitPidFd() } // DevptsFd returns dirfd of devpts mount. func (d *lxc) DevptsFd() (*os.File, error) { // Load the go-lxc struct - err := d.initLXC(false) + cc, err := d.initLXC(false) if err != nil { return nil, err } @@ -7988,7 +8018,7 @@ func (d *lxc) DevptsFd() (*os.File, error) { return nil, fmt.Errorf("Missing devpts_fd extension") } - return d.c.DevptsFd() + return cc.DevptsFd() } // CurrentIdmap returns current IDMAP. @@ -8059,12 +8089,12 @@ func (d *lxc) LogFilePath() string { func (d *lxc) CGroup() (*cgroup.CGroup, error) { // Load the go-lxc struct - err := d.initLXC(false) + cc, err := d.initLXC(false) if err != nil { return nil, err } - return d.cgroup(nil) + return d.cgroup(cc) } func (d *lxc) cgroup(cc *liblxc.Container) (*cgroup.CGroup, error) { @@ -8145,8 +8175,13 @@ func (d *lxc) Metrics(hostInterfaces []net.Interface) (*metrics.MetricSet, error return nil, ErrInstanceIsStopped } + cc, err := d.initLXC(false) + if err != nil { + return nil, err + } + // Load cgroup abstraction - cg, err := d.cgroup(nil) + cg, err := d.cgroup(cc) if err != nil { return nil, err } From c95b2e4d1c4aa6f9aa2a1b5dbf33531a6aecff6d Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 12 Jun 2023 13:33:08 +0100 Subject: [PATCH 026/543] lxd/instance: Don't return instance.Instance from instanceCreateFromImage It was not used. Signed-off-by: Thomas Parrott --- lxd/instance.go | 18 +++++++++--------- lxd/instances_post.go | 3 +-- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/lxd/instance.go b/lxd/instance.go index b13ab1739339..ebdd97f29fcf 100644 --- a/lxd/instance.go +++ b/lxd/instance.go @@ -115,18 +115,18 @@ func ensureImageIsLocallyAvailable(s *state.State, r *http.Request, img *api.Ima } // instanceCreateFromImage creates an instance from a rootfs image. -func instanceCreateFromImage(s *state.State, r *http.Request, img *api.Image, args db.InstanceArgs, op *operations.Operation) (instance.Instance, error) { +func instanceCreateFromImage(s *state.State, r *http.Request, img *api.Image, args db.InstanceArgs, op *operations.Operation) error { revert := revert.New() defer revert.Fail() // Validate the type of the image matches the type of the instance. imgType, err := instancetype.New(img.Type) if err != nil { - return nil, err + return err } if imgType != args.Type { - return nil, fmt.Errorf("Requested image's type %q doesn't match instance type %q", imgType, args.Type) + return fmt.Errorf("Requested image's type %q doesn't match instance type %q", imgType, args.Type) } // Set the "image.*" keys. @@ -142,7 +142,7 @@ func instanceCreateFromImage(s *state.State, r *http.Request, img *api.Image, ar // Create the instance. inst, instOp, cleanup, err := instance.CreateInternal(s, args, true) if err != nil { - return nil, fmt.Errorf("Failed creating instance record: %w", err) + return fmt.Errorf("Failed creating instance record: %w", err) } revert.Add(cleanup) @@ -157,28 +157,28 @@ func instanceCreateFromImage(s *state.State, r *http.Request, img *api.Image, ar return nil }) if err != nil { - return nil, err + return err } pool, err := storagePools.LoadByInstance(s, inst) if err != nil { - return nil, fmt.Errorf("Failed loading instance storage pool: %w", err) + return fmt.Errorf("Failed loading instance storage pool: %w", err) } err = pool.CreateInstanceFromImage(inst, img.Fingerprint, op) if err != nil { - return nil, fmt.Errorf("Failed creating instance from image: %w", err) + return fmt.Errorf("Failed creating instance from image: %w", err) } revert.Add(func() { _ = inst.Delete(true) }) err = inst.UpdateBackupFile() if err != nil { - return nil, err + return err } revert.Success() - return inst, nil + return nil } // instanceCreateAsCopyOpts options for copying an instance. diff --git a/lxd/instances_post.go b/lxd/instances_post.go index ff49d8078acd..8a46e1b68325 100644 --- a/lxd/instances_post.go +++ b/lxd/instances_post.go @@ -115,8 +115,7 @@ func createFromImage(s *state.State, r *http.Request, p api.Project, profiles [] return err } - _, err = instanceCreateFromImage(s, r, img, args, op) - return err + return instanceCreateFromImage(s, r, img, args, op) } resources := map[string][]string{} From b6c07f05b600e8484f09be3e0e212a5847a9248f Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 12 Jun 2023 13:39:43 +0100 Subject: [PATCH 027/543] test: Improve liblxc file handle leak detection Signed-off-by: Thomas Parrott --- test/suites/fdleak.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/suites/fdleak.sh b/test/suites/fdleak.sh index f7a4d43e88d7..2a17b3dbfd68 100644 --- a/test/suites/fdleak.sh +++ b/test/suites/fdleak.sh @@ -27,6 +27,9 @@ test_fdleak() { exit 0 ) + # Check for open handles to liblxc lxc.log files. + ! find "/proc/${pid}/fd" -ls | grep lxc.log || false + for i in $(seq 20); do afterfds=$(/bin/ls "/proc/${pid}/fd" | wc -l) leakedfds=$((afterfds - beforefds)) From 09dd9e156013325bc46b32387521caac245d79ee Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 12 Jun 2023 14:02:04 +0100 Subject: [PATCH 028/543] lxd/isntance/drivers/driver/lxc: SetFinalizer for clearing liblxc.Container reference once in initLXC Fixes file descriptor leak that is not cleared by clearing the gc. Signed-off-by: Thomas Parrott --- lxd/instance/drivers/driver_lxc.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go index a737e32cd3ac..607a9bd3c905 100644 --- a/lxd/instance/drivers/driver_lxc.go +++ b/lxd/instance/drivers/driver_lxc.go @@ -343,9 +343,6 @@ func lxcLoad(s *state.State, args db.InstanceArgs, p api.Project) (instance.Inst // Create the container struct d := lxcInstantiate(s, args, nil, p) - // Setup finalizer - runtime.SetFinalizer(d, lxcUnload) - // Expand config and devices err := d.(*lxc).expandConfig() if err != nil { @@ -357,7 +354,6 @@ func lxcLoad(s *state.State, args db.InstanceArgs, p api.Project) (instance.Inst // Unload is called by the garbage collector. func lxcUnload(d *lxc) { - runtime.SetFinalizer(d, nil) d.release() } @@ -426,7 +422,8 @@ type lxc struct { // Config handling. fromHook bool - cMu sync.Mutex + cMu sync.Mutex + cFinalizer sync.Once // Cached handles. // Do not use these variables directly, instead use their associated get functions so they @@ -651,6 +648,10 @@ func (d *lxc) initLXC(config bool) (*liblxc.Container, error) { return d.c, nil } + // As we are now going to be initialising a liblxc.Container reference, set the finalizer so that it is + // cleaned up (if needed) when the garbage collector destroys this instance struct. + d.cFinalizer.Do(func() { runtime.SetFinalizer(d, lxcUnload) }) + revert := revert.New() defer revert.Fail() From 5f5691d86de278ef70506b5710aa2c6e2b3f471a Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Tue, 13 Jun 2023 10:05:42 +0100 Subject: [PATCH 029/543] lxd/storage/drivers/driver/zfs/volumes: Only delete volume on failure if not doing refresh in createVolumeFromMigrationOptimized Fixes #11822 Signed-off-by: Thomas Parrott --- lxd/storage/drivers/driver_zfs_volumes.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/lxd/storage/drivers/driver_zfs_volumes.go b/lxd/storage/drivers/driver_zfs_volumes.go index a310446302fc..499e0139e8d9 100644 --- a/lxd/storage/drivers/driver_zfs_volumes.go +++ b/lxd/storage/drivers/driver_zfs_volumes.go @@ -1064,18 +1064,19 @@ func (d *zfs) createVolumeFromMigrationOptimized(vol Volume, conn io.ReadWriteCl } } + if !volTargetArgs.Refresh { + revert.Add(func() { + _ = d.DeleteVolume(vol, op) + }) + } + // Transfer the main volume. wrapper := migration.ProgressWriter(op, "fs_progress", vol.name) err = d.receiveDataset(vol, conn, wrapper) if err != nil { - _ = d.DeleteVolume(vol, op) return fmt.Errorf("Failed receiving volume %q: %w", vol.Name(), err) } - revert.Add(func() { - _ = d.DeleteVolume(vol, op) - }) - // Strip internal snapshots. entries, err := d.getDatasets(d.dataset(vol, false)) if err != nil { From 1591fb82d61477606f3966c30e826b51eabb49b6 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Wed, 14 Jun 2023 14:12:48 +0100 Subject: [PATCH 030/543] lxd/device/gpu/physical: Fix panic when GPU device doesn't have DRM support in startContainer Signed-off-by: Thomas Parrott --- lxd/device/gpu_physical.go | 60 ++++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 29 deletions(-) diff --git a/lxd/device/gpu_physical.go b/lxd/device/gpu_physical.go index 443bf95194d8..ba7e3019dc1b 100644 --- a/lxd/device/gpu_physical.go +++ b/lxd/device/gpu_physical.go @@ -120,42 +120,44 @@ func (d *gpuPhysical) startContainer() (*deviceConfig.RunConfig, error) { found = true // Setup DRM unix-char devices if present. - if gpu.DRM.CardName != "" && gpu.DRM.CardDevice != "" && shared.PathExists(filepath.Join(gpuDRIDevPath, gpu.DRM.CardName)) { - path := filepath.Join(gpuDRIDevPath, gpu.DRM.CardName) - major, minor, err := d.deviceNumStringToUint32(gpu.DRM.CardDevice) - if err != nil { - return nil, err - } + if gpu.DRM != nil { + if gpu.DRM.CardName != "" && gpu.DRM.CardDevice != "" && shared.PathExists(filepath.Join(gpuDRIDevPath, gpu.DRM.CardName)) { + path := filepath.Join(gpuDRIDevPath, gpu.DRM.CardName) + major, minor, err := d.deviceNumStringToUint32(gpu.DRM.CardDevice) + if err != nil { + return nil, err + } - err = unixDeviceSetupCharNum(d.state, d.inst.DevicesPath(), "unix", d.name, d.config, major, minor, path, false, &runConf) - if err != nil { - return nil, err + err = unixDeviceSetupCharNum(d.state, d.inst.DevicesPath(), "unix", d.name, d.config, major, minor, path, false, &runConf) + if err != nil { + return nil, err + } } - } - if gpu.DRM.RenderName != "" && gpu.DRM.RenderDevice != "" && shared.PathExists(filepath.Join(gpuDRIDevPath, gpu.DRM.RenderName)) { - path := filepath.Join(gpuDRIDevPath, gpu.DRM.RenderName) - major, minor, err := d.deviceNumStringToUint32(gpu.DRM.RenderDevice) - if err != nil { - return nil, err - } + if gpu.DRM.RenderName != "" && gpu.DRM.RenderDevice != "" && shared.PathExists(filepath.Join(gpuDRIDevPath, gpu.DRM.RenderName)) { + path := filepath.Join(gpuDRIDevPath, gpu.DRM.RenderName) + major, minor, err := d.deviceNumStringToUint32(gpu.DRM.RenderDevice) + if err != nil { + return nil, err + } - err = unixDeviceSetupCharNum(d.state, d.inst.DevicesPath(), "unix", d.name, d.config, major, minor, path, false, &runConf) - if err != nil { - return nil, err + err = unixDeviceSetupCharNum(d.state, d.inst.DevicesPath(), "unix", d.name, d.config, major, minor, path, false, &runConf) + if err != nil { + return nil, err + } } - } - if gpu.DRM.ControlName != "" && gpu.DRM.ControlDevice != "" && shared.PathExists(filepath.Join(gpuDRIDevPath, gpu.DRM.ControlName)) { - path := filepath.Join(gpuDRIDevPath, gpu.DRM.ControlName) - major, minor, err := d.deviceNumStringToUint32(gpu.DRM.ControlDevice) - if err != nil { - return nil, err - } + if gpu.DRM.ControlName != "" && gpu.DRM.ControlDevice != "" && shared.PathExists(filepath.Join(gpuDRIDevPath, gpu.DRM.ControlName)) { + path := filepath.Join(gpuDRIDevPath, gpu.DRM.ControlName) + major, minor, err := d.deviceNumStringToUint32(gpu.DRM.ControlDevice) + if err != nil { + return nil, err + } - err = unixDeviceSetupCharNum(d.state, d.inst.DevicesPath(), "unix", d.name, d.config, major, minor, path, false, &runConf) - if err != nil { - return nil, err + err = unixDeviceSetupCharNum(d.state, d.inst.DevicesPath(), "unix", d.name, d.config, major, minor, path, false, &runConf) + if err != nil { + return nil, err + } } } From 605316702a137b9f5e1823c9a9466f8a6013d36e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Wed, 14 Jun 2023 17:36:08 +0200 Subject: [PATCH 031/543] lxd/device: Fix regression for not properly checking for GPU DRM information MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/device/gpu.go | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/lxd/device/gpu.go b/lxd/device/gpu.go index 6c6424cad3db..e649bab7271b 100644 --- a/lxd/device/gpu.go +++ b/lxd/device/gpu.go @@ -71,8 +71,21 @@ func gpuValidationRules(requiredFields []string, optionalFields []string) map[st // Check if the device matches the given GPU card. // It matches based on vendorid, pci, productid or id setting of the device. func gpuSelected(device config.Device, gpu api.ResourcesGPUCard) bool { - return !((device["vendorid"] != "" && gpu.VendorID != device["vendorid"]) || - (device["pci"] != "" && gpu.PCIAddress != device["pci"]) || - (device["productid"] != "" && gpu.ProductID != device["productid"]) || - (device["id"] != "" && (gpu.DRM == nil || fmt.Sprintf("%d", gpu.DRM.ID) != device["id"]))) + if device["vendorid"] != "" && gpu.VendorID == device["vendorid"] { + return true + } + + if device["pci"] != "" && gpu.PCIAddress == device["pci"] { + return true + } + + if device["productid"] != "" && gpu.ProductID == device["productid"] { + return true + } + + if device["id"] != "" && gpu.DRM != nil && fmt.Sprintf("%d", gpu.DRM.ID) == device["id"] { + return true + } + + return false } From ea452b997a8fbbaea0118b89f524331af47bdee9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Wed, 14 Jun 2023 16:55:35 -0400 Subject: [PATCH 032/543] lxd-migrate: Fix SecureBoot handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #11829 Signed-off-by: Stéphane Graber --- lxd-migrate/main_migrate.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lxd-migrate/main_migrate.go b/lxd-migrate/main_migrate.go index 010caf4d2865..476ba69f8632 100644 --- a/lxd-migrate/main_migrate.go +++ b/lxd-migrate/main_migrate.go @@ -349,8 +349,8 @@ func (c *cmdMigrate) RunInteractive(server lxd.InstanceServer) (cmdMigrateData, return cmdMigrateData{}, err } - if hasSecureBoot { - config.InstanceArgs.Config["security.secureboot"] = "true" + if !hasSecureBoot { + config.InstanceArgs.Config["security.secureboot"] = "false" } } } From 0f267e22c59117155655394d45fd658c731df76e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Wed, 19 Apr 2023 23:58:47 -0400 Subject: [PATCH 033/543] api: instances_state_total MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- doc/api-extensions.md | 3 +++ shared/version/api.go | 1 + 2 files changed, 4 insertions(+) diff --git a/doc/api-extensions.md b/doc/api-extensions.md index 4d7e7bdb16b7..1ac169f2e4a9 100644 --- a/doc/api-extensions.md +++ b/doc/api-extensions.md @@ -2046,3 +2046,6 @@ This also adds the `CRIUType_VM_QEMU` value of `3` for the migration `CRIUType` ## `auth_user` Add current user details to the main API endpoint. + +## `instances_state_total` +This extension adds a new `total` field to `InstanceStateDisk` and `InstanceStateMemory`, both part of the instance's state API. diff --git a/shared/version/api.go b/shared/version/api.go index 340b74bfb4db..e616263eaef4 100644 --- a/shared/version/api.go +++ b/shared/version/api.go @@ -346,6 +346,7 @@ var APIExtensions = []string{ "storage_pool_loop_resize", "migration_vm_live", "auth_user", + "instances_state_total", } // APIExtensionsCount returns the number of available API extensions. From 8839fcb17575ad690267f51f004ab0feb3af6229 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Tue, 16 May 2023 13:59:53 +0200 Subject: [PATCH 034/543] shared/api: New InstancePost attributes to handle instance rebuilding Signed-off-by: Gabriel Mougard --- shared/api/instance.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/shared/api/instance.go b/shared/api/instance.go index 47ab0707fc78..40c7d29c6a8d 100644 --- a/shared/api/instance.go +++ b/shared/api/instance.go @@ -167,6 +167,16 @@ type InstancePut struct { Description string `json:"description" yaml:"description"` } +// InstanceRebuildPost indicates how to rebuild an instance. +// +// swagger:model +// +// API extension: instances_rebuild. +type InstanceRebuildPost struct { + // Rebuild source + Source InstanceSource `json:"source" yaml:"source"` +} + // Instance represents a LXD instance. // // swagger:model From 6393509c3cc54b04ab7fa0e959583ab7b8eadefa Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Wed, 31 May 2023 20:13:07 +0200 Subject: [PATCH 035/543] lxd/client: New client method to handle instance rebuilding Signed-off-by: Gabriel Mougard --- client/interfaces.go | 2 + client/lxd_instances.go | 113 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 115 insertions(+) diff --git a/client/interfaces.go b/client/interfaces.go index 97c0d828366b..813e24a755b1 100644 --- a/client/interfaces.go +++ b/client/interfaces.go @@ -173,6 +173,8 @@ type InstanceServer interface { MigrateInstance(name string, instance api.InstancePost) (op Operation, err error) DeleteInstance(name string) (op Operation, err error) UpdateInstances(state api.InstancesPut, ETag string) (op Operation, err error) + RebuildInstance(instanceName string, req api.InstanceRebuildPost) (op Operation, err error) + RebuildInstanceFromImage(source ImageServer, image api.Image, instanceName string, req api.InstanceRebuildPost) (op RemoteOperation, err error) ExecInstance(instanceName string, exec api.InstanceExecPost, args *InstanceExecArgs) (op Operation, err error) ConsoleInstance(instanceName string, console api.InstanceConsolePost, args *InstanceConsoleArgs) (op Operation, err error) diff --git a/client/lxd_instances.go b/client/lxd_instances.go index 3bfef9092da9..cd52b8b8a9c5 100644 --- a/client/lxd_instances.go +++ b/client/lxd_instances.go @@ -211,6 +211,119 @@ func (r *ProtocolLXD) UpdateInstances(state api.InstancesPut, ETag string) (Oper return op, nil } +func (r *ProtocolLXD) rebuildInstance(instanceName string, instance api.InstanceRebuildPost) (Operation, error) { + path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) + if err != nil { + return nil, err + } + + // Send the request + op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/rebuild?project=%s", path, url.PathEscape(instanceName), r.project), instance, "") + if err != nil { + return nil, err + } + + return op, nil +} + +func (r *ProtocolLXD) tryRebuildInstance(instanceName string, req api.InstanceRebuildPost, urls []string, op Operation) (RemoteOperation, error) { + if len(urls) == 0 { + return nil, fmt.Errorf("The source server isn't listening on the network") + } + + rop := remoteOperation{ + chDone: make(chan bool), + } + + operation := req.Source.Operation + + // Forward targetOp to remote op + go func() { + success := false + var errors []remoteOperationResult + for _, serverURL := range urls { + if operation == "" { + req.Source.Server = serverURL + } else { + req.Source.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) + } + + op, err := r.rebuildInstance(instanceName, req) + if err != nil { + errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) + continue + } + + rop.handlerLock.Lock() + rop.targetOp = op + rop.handlerLock.Unlock() + + for _, handler := range rop.handlers { + _, _ = rop.targetOp.AddHandler(handler) + } + + err = rop.targetOp.Wait() + if err != nil { + errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) + if shared.IsConnectionError(err) { + continue + } + + break + } + + success = true + break + } + + if !success { + rop.err = remoteOperationError("Failed instance rebuild", errors) + if op != nil { + _ = op.Cancel() + } + } + + close(rop.chDone) + }() + + return &rop, nil +} + +// RebuildInstanceFromImage rebuilds an instance from an image. +func (r *ProtocolLXD) RebuildInstanceFromImage(source ImageServer, image api.Image, instanceName string, req api.InstanceRebuildPost) (RemoteOperation, error) { + info, err := r.getSourceImageConnectionInfo(source, image, &req.Source) + if err != nil { + return nil, err + } + + if info == nil { + op, err := r.rebuildInstance(instanceName, req) + if err != nil { + return nil, err + } + + rop := remoteOperation{ + targetOp: op, + chDone: make(chan bool), + } + + // Forward targetOp to remote op + go func() { + rop.err = rop.targetOp.Wait() + close(rop.chDone) + }() + + return &rop, nil + } + + return r.tryRebuildInstance(instanceName, req, info.Addresses, nil) +} + +// RebuildInstance rebuilds an instance as empty. +func (r *ProtocolLXD) RebuildInstance(instanceName string, instance api.InstanceRebuildPost) (op Operation, err error) { + return r.rebuildInstance(instanceName, instance) +} + // GetInstancesFull returns a list of instances including snapshots, backups and state. func (r *ProtocolLXD) GetInstancesFull(instanceType api.InstanceType) ([]api.InstanceFull, error) { instances := []api.InstanceFull{} From 7c6c411bd6686c869bdae6efc14638387dec2148 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Thu, 15 Jun 2023 10:36:11 +0200 Subject: [PATCH 036/543] Revert "lxd/device: Fix regression for not properly checking for GPU DRM information" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/device/gpu.go | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/lxd/device/gpu.go b/lxd/device/gpu.go index e649bab7271b..6c6424cad3db 100644 --- a/lxd/device/gpu.go +++ b/lxd/device/gpu.go @@ -71,21 +71,8 @@ func gpuValidationRules(requiredFields []string, optionalFields []string) map[st // Check if the device matches the given GPU card. // It matches based on vendorid, pci, productid or id setting of the device. func gpuSelected(device config.Device, gpu api.ResourcesGPUCard) bool { - if device["vendorid"] != "" && gpu.VendorID == device["vendorid"] { - return true - } - - if device["pci"] != "" && gpu.PCIAddress == device["pci"] { - return true - } - - if device["productid"] != "" && gpu.ProductID == device["productid"] { - return true - } - - if device["id"] != "" && gpu.DRM != nil && fmt.Sprintf("%d", gpu.DRM.ID) == device["id"] { - return true - } - - return false + return !((device["vendorid"] != "" && gpu.VendorID != device["vendorid"]) || + (device["pci"] != "" && gpu.PCIAddress != device["pci"]) || + (device["productid"] != "" && gpu.ProductID != device["productid"]) || + (device["id"] != "" && (gpu.DRM == nil || fmt.Sprintf("%d", gpu.DRM.ID) != device["id"]))) } From 725f84f37de67713d653055f4e7629fe181c84b2 Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 15 Jun 2023 11:19:47 +0000 Subject: [PATCH 037/543] lxd/sys: Remove loading vhost_vsock module on init Signed-off-by: Din Music --- lxd/sys/os.go | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/lxd/sys/os.go b/lxd/sys/os.go index 51adaade4093..4274c8718fcd 100644 --- a/lxd/sys/os.go +++ b/lxd/sys/os.go @@ -11,8 +11,6 @@ import ( "sync" "time" - "github.com/mdlayher/vsock" - "github.com/canonical/lxd/lxd/cgroup" "github.com/canonical/lxd/lxd/db/cluster" "github.com/canonical/lxd/lxd/storage/filesystem" @@ -95,9 +93,6 @@ type OS struct { // LXC features LXCFeatures map[string]bool - // VM features - VsockID uint32 - // OS info ReleaseInfo map[string]string KernelVersion version.DottedVersion @@ -181,18 +176,6 @@ func (s *OS) Init() ([]cluster.Warning, error) { cgroup.Init() s.CGInfo = cgroup.GetInfo() - // Fill in the VsockID. - _ = util.LoadModule("vhost_vsock") - - vsockID, err := vsock.ContextID() - if err != nil || vsockID > 2147483647 { - // Fallback to the default ID for a host system if we're getting - // an error or are getting a clearly invalid value. - vsockID = 2 - } - - s.VsockID = vsockID - // Fill in the OS release info. osInfo, err := osarch.GetLSBRelease() if err != nil { From f544ab95fec72b08d9fc0fbf7813058feccc2049 Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 15 Jun 2023 11:33:32 +0000 Subject: [PATCH 038/543] lxd/instance/drivers: Get vsockID during qemu chectFeatures Signed-off-by: Din Music --- lxd/instance/drivers/driver_qemu.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 5a26352bc4a2..232058a02523 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -7254,7 +7254,15 @@ func (d *qemu) vsockID() int { // We then add the VM's own instance id (1 or higher) to give us a // unique, non-clashing context ID for our guest. - return int(d.state.OS.VsockID) + 1 + d.id + info := DriverStatuses()[instancetype.VM].Info + feature, found := info.Features["vhost_vsock"] + + vsockID, ok := feature.(int) + if !found || !ok { + vsockID = vsock.Host + } + + return vsockID + 1 + d.id } // InitPID returns the instance's current process ID. @@ -7765,6 +7773,14 @@ func (d *qemu) checkFeatures(hostArch int, qemuPath string) (map[string]any, err features["vhost_net"] = struct{}{} } + vsockID, err := vsock.ContextID() + if err != nil || vsockID > 2147483647 { + // Fallback to the default ID for a host system + features["vhost_vsock"] = vsock.Host + } else { + features["vhost_vsock"] = vsockID + } + return features, nil } From 3f0e61fb06fbcce6bcb0183c086fd8a04e36cc28 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Wed, 7 Jun 2023 11:02:38 +0200 Subject: [PATCH 039/543] shared/util: Add `StringPrefixInSlice(key string, list []string) bool` Signed-off-by: Gabriel Mougard --- shared/util.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/shared/util.go b/shared/util.go index d7b728924ad3..811b520d877f 100644 --- a/shared/util.go +++ b/shared/util.go @@ -631,6 +631,17 @@ func StringInSlice(key string, list []string) bool { return false } +// StringPrefixInSlice returns true if any element in the list has the given prefix. +func StringPrefixInSlice(key string, list []string) bool { + for _, entry := range list { + if strings.HasPrefix(entry, key) { + return true + } + } + + return false +} + // RemoveElementsFromStringSlice returns a slice equivalent to removing the given elements from the given list. // Elements not present in the list are ignored. func RemoveElementsFromStringSlice(list []string, elements ...string) []string { From 84d47bb4f3f111577904d223c62de30cff1aa1c5 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Mon, 5 Jun 2023 17:55:38 +0200 Subject: [PATCH 040/543] lxd/operations: Use `map[string][]api.URL` as resources Signed-off-by: Gabriel Mougard --- lxd/operations/operations.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/lxd/operations/operations.go b/lxd/operations/operations.go index f11c53b72b1f..8e8c92b4220b 100644 --- a/lxd/operations/operations.go +++ b/lxd/operations/operations.go @@ -97,7 +97,7 @@ type Operation struct { updatedAt time.Time status api.StatusCode url string - resources map[string][]string + resources map[string][]api.URL metadata map[string]any err error readonly bool @@ -125,7 +125,7 @@ type Operation struct { // OperationCreate creates a new operation and returns it. If it cannot be // created, it returns an error. -func OperationCreate(s *state.State, projectName string, opClass OperationClass, opType operationtype.Type, opResources map[string][]string, opMetadata any, onRun func(*Operation) error, onCancel func(*Operation) error, onConnect func(*Operation, *http.Request, http.ResponseWriter) error, r *http.Request) (*Operation, error) { +func OperationCreate(s *state.State, projectName string, opClass OperationClass, opType operationtype.Type, opResources map[string][]api.URL, opMetadata any, onRun func(*Operation) error, onCancel func(*Operation) error, onConnect func(*Operation, *http.Request, http.ResponseWriter) error, r *http.Request) (*Operation, error) { // Don't allow new operations when LXD is shutting down. if s != nil && s.ShutdownCtx.Err() == context.Canceled { return nil, fmt.Errorf("LXD is shutting down") @@ -466,19 +466,20 @@ func (op *Operation) mayCancel() bool { // Returns URL of operation and operation info. func (op *Operation) Render() (string, *api.Operation, error) { // Setup the resource URLs + renderedResources := make(map[string][]string) resources := op.resources if resources != nil { tmpResources := make(map[string][]string) for key, value := range resources { var values []string for _, c := range value { - values = append(values, fmt.Sprintf("/%s/%s/%s", version.APIVersion, key, c)) + values = append(values, c.String()) } tmpResources[key] = values } - resources = tmpResources + renderedResources = tmpResources } // Local server name @@ -492,7 +493,7 @@ func (op *Operation) Render() (string, *api.Operation, error) { UpdatedAt: op.updatedAt, Status: op.status.String(), StatusCode: op.status, - Resources: resources, + Resources: renderedResources, Metadata: op.metadata, MayCancel: op.mayCancel(), } @@ -523,7 +524,7 @@ func (op *Operation) Wait(ctx context.Context) error { // UpdateResources updates the resources of the operation. It returns an error // if the operation is not pending or running, or the operation is read-only. -func (op *Operation) UpdateResources(opResources map[string][]string) error { +func (op *Operation) UpdateResources(opResources map[string][]api.URL) error { op.lock.Lock() if op.status != api.Pending && op.status != api.Running { op.lock.Unlock() @@ -649,7 +650,7 @@ func (op *Operation) URL() string { } // Resources returns the operation resources. -func (op *Operation) Resources() map[string][]string { +func (op *Operation) Resources() map[string][]api.URL { return op.resources } From 65ea6cf3fb8c6f370e7abf8124dbc620890d8cab Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Tue, 6 Jun 2023 19:27:41 +0200 Subject: [PATCH 041/543] lxd/operations: include project name in resource URL Signed-off-by: Gabriel Mougard --- lxd/operations/operations.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/operations/operations.go b/lxd/operations/operations.go index 8e8c92b4220b..a2b78d39c8f1 100644 --- a/lxd/operations/operations.go +++ b/lxd/operations/operations.go @@ -473,7 +473,7 @@ func (op *Operation) Render() (string, *api.Operation, error) { for key, value := range resources { var values []string for _, c := range value { - values = append(values, c.String()) + values = append(values, c.Project(op.Project()).String()) } tmpResources[key] = values From 0e7b5b881fa7b65d69f04260aabab298cf1864a0 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Mon, 5 Jun 2023 17:56:34 +0200 Subject: [PATCH 042/543] lxc: Use api.URL in resources passed to `OperationCreate` Signed-off-by: Gabriel Mougard --- lxc/export.go | 15 ++++++++++++--- lxc/storage_volume.go | 14 ++++++++++++-- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/lxc/export.go b/lxc/export.go index f11a8bbb8393..0515cdf0aff2 100644 --- a/lxc/export.go +++ b/lxc/export.go @@ -3,8 +3,9 @@ package main import ( "fmt" "io" + "net/url" "os" - "strings" + "path" "time" "github.com/spf13/cobra" @@ -107,8 +108,16 @@ func (c *cmdExport) Run(cmd *cobra.Command, args []string) error { } // Get name of backup - backupName := strings.TrimPrefix(op.Get().Resources["backups"][0], - "/1.0/backups/") + uStr := op.Get().Resources["backups"][0] + u, err := url.Parse(uStr) + if err != nil { + return fmt.Errorf("Invalid URL %q: %w", uStr, err) + } + + backupName, err := url.PathUnescape(path.Base(u.EscapedPath())) + if err != nil { + return fmt.Errorf("Invalid backup name segment in path %q: %w", u.EscapedPath(), err) + } defer func() { // Delete backup after we're done diff --git a/lxc/storage_volume.go b/lxc/storage_volume.go index 97efdb7cb1a0..3af7ed86d602 100644 --- a/lxc/storage_volume.go +++ b/lxc/storage_volume.go @@ -3,7 +3,9 @@ package main import ( "fmt" "io" + "net/url" "os" + "path" "sort" "strconv" "strings" @@ -2176,8 +2178,16 @@ func (c *cmdStorageVolumeExport) Run(cmd *cobra.Command, args []string) error { } // Get name of backup - backupName := strings.TrimPrefix(op.Get().Resources["backups"][0], - "/1.0/backups/") + uStr := op.Get().Resources["backups"][0] + u, err := url.Parse(uStr) + if err != nil { + return fmt.Errorf("Invalid URL %q: %w", uStr, err) + } + + backupName, err := url.PathUnescape(path.Base(u.EscapedPath())) + if err != nil { + return fmt.Errorf("Invalid backup name segment in path %q: %w", u.EscapedPath(), err) + } defer func() { // Delete backup after we're done From 273940d29604d7183fc27617ef5a711193ae22f2 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Mon, 5 Jun 2023 17:57:50 +0200 Subject: [PATCH 043/543] lxd: Use api.URL in resources passed to `OperationCreate` Signed-off-by: Gabriel Mougard --- lxd/api_cluster.go | 12 ++++++------ lxd/images.go | 10 +++++----- lxd/instance_backup.go | 21 +++++++++++++-------- lxd/instance_console.go | 5 +++-- lxd/instance_delete.go | 6 ++++-- lxd/instance_exec.go | 8 ++++---- lxd/instance_post.go | 25 +++++++++++++------------ lxd/instance_put.go | 5 +++-- lxd/instance_snapshot.go | 32 ++++++++++++++++++-------------- lxd/instance_state.go | 5 +++-- lxd/instances_post.go | 24 ++++++++++++------------ lxd/instances_put.go | 8 ++++++-- lxd/storage_volumes.go | 12 ++++++------ lxd/storage_volumes_backup.go | 19 ++++++++++--------- lxd/storage_volumes_snapshot.go | 13 +++++++------ 15 files changed, 113 insertions(+), 92 deletions(-) diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go index 3e08e0f25fa6..863e1af25791 100644 --- a/lxd/api_cluster.go +++ b/lxd/api_cluster.go @@ -371,8 +371,8 @@ func clusterPutBootstrap(d *Daemon, r *http.Request, req api.ClusterPut) respons return nil } - resources := map[string][]string{} - resources["cluster"] = []string{} + resources := map[string][]api.URL{} + resources["cluster"] = []api.URL{} // If there's no cluster.https_address set, but core.https_address is, // let's default to it. @@ -799,8 +799,8 @@ func clusterPutJoin(d *Daemon, r *http.Request, req api.ClusterPut) response.Res return nil } - resources := map[string][]string{} - resources["cluster"] = []string{} + resources := map[string][]api.URL{} + resources["cluster"] = []api.URL{} op, err := operations.OperationCreate(s, "", operations.OperationClassTask, operationtype.ClusterJoin, resources, nil, run, nil, nil, r) if err != nil { @@ -1364,8 +1364,8 @@ func clusterNodesPost(d *Daemon, r *http.Request) response.Response { "expiresAt": expiry, } - resources := map[string][]string{} - resources["cluster"] = []string{} + resources := map[string][]api.URL{} + resources["cluster"] = []api.URL{} op, err := operations.OperationCreate(s, project.Default, operations.OperationClassToken, operationtype.ClusterJoinToken, resources, meta, nil, nil, nil, r) if err != nil { diff --git a/lxd/images.go b/lxd/images.go index 225acbe5964a..fa0210ebabd0 100644 --- a/lxd/images.go +++ b/lxd/images.go @@ -2537,8 +2537,8 @@ func imageDelete(d *Daemon, r *http.Request) response.Response { return nil } - resources := map[string][]string{} - resources["images"] = []string{imgInfo.Fingerprint} + resources := map[string][]api.URL{} + resources["images"] = []api.URL{*api.NewURL().Path(version.APIVersion, "images", imgInfo.Fingerprint)} op, err := operations.OperationCreate(s, projectName, operations.OperationClassTask, operationtype.ImageDelete, resources, nil, do, nil, nil, r) if err != nil { @@ -2602,7 +2602,7 @@ func imageValidSecret(s *state.State, r *http.Request, projectName string, finge continue } - if !shared.StringInSlice(fmt.Sprintf("/1.0/images/%s", fingerprint), opImages) { + if !shared.StringPrefixInSlice(api.NewURL().Path(version.APIVersion, "images", fingerprint).String(), opImages) { continue } @@ -4303,8 +4303,8 @@ func createTokenResponse(s *state.State, r *http.Request, projectName string, fi meta["secret"] = secret - resources := map[string][]string{} - resources["images"] = []string{fingerprint} + resources := map[string][]api.URL{} + resources["images"] = []api.URL{*api.NewURL().Path(version.APIVersion, "images", fingerprint)} op, err := operations.OperationCreate(s, projectName, operations.OperationClassToken, operationtype.ImageToken, resources, meta, nil, nil, nil, r) if err != nil { diff --git a/lxd/instance_backup.go b/lxd/instance_backup.go index e11497003c86..29f2d9cf3b2f 100644 --- a/lxd/instance_backup.go +++ b/lxd/instance_backup.go @@ -335,14 +335,14 @@ func instanceBackupsPost(d *Daemon, r *http.Request) response.Response { return nil } - resources := map[string][]string{} - resources["instances"] = []string{name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name)} if inst.Type() == instancetype.Container { resources["containers"] = resources["instances"] } - resources["backups"] = []string{req.Name} + resources["backups"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name, "backups", req.Name)} op, err := operations.OperationCreate(s, projectName, operations.OperationClassTask, operationtype.BackupCreate, resources, nil, backup, nil, nil, r) @@ -528,9 +528,11 @@ func instanceBackupPost(d *Daemon, r *http.Request) response.Response { return nil } - resources := map[string][]string{} - resources["instances"] = []string{name} - resources["containers"] = resources["instances"] + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name)} + if instanceType == instancetype.Container { + resources["containers"] = resources["instances"] + } op, err := operations.OperationCreate(s, projectName, operations.OperationClassTask, operationtype.BackupRename, resources, nil, rename, nil, nil, r) @@ -615,8 +617,11 @@ func instanceBackupDelete(d *Daemon, r *http.Request) response.Response { return nil } - resources := map[string][]string{} - resources["container"] = []string{name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name)} + if instanceType == instancetype.Container { + resources["containers"] = resources["instances"] + } op, err := operations.OperationCreate(s, projectName, operations.OperationClassTask, operationtype.BackupRemove, resources, nil, remove, nil, nil, r) diff --git a/lxd/instance_console.go b/lxd/instance_console.go index 72d9c18bfc77..a7d2849c0ec5 100644 --- a/lxd/instance_console.go +++ b/lxd/instance_console.go @@ -27,6 +27,7 @@ import ( "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" "github.com/canonical/lxd/shared/logger" + "github.com/canonical/lxd/shared/version" "github.com/canonical/lxd/shared/ws" ) @@ -505,8 +506,8 @@ func instanceConsolePost(d *Daemon, r *http.Request) response.Response { ws.height = post.Height ws.protocol = post.Type - resources := map[string][]string{} - resources["instances"] = []string{ws.instance.Name()} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", ws.instance.Name())} if inst.Type() == instancetype.Container { resources["containers"] = resources["instances"] diff --git a/lxd/instance_delete.go b/lxd/instance_delete.go index 6b0427b09bef..0f22c710ad03 100644 --- a/lxd/instance_delete.go +++ b/lxd/instance_delete.go @@ -13,6 +13,8 @@ import ( "github.com/canonical/lxd/lxd/operations" "github.com/canonical/lxd/lxd/response" "github.com/canonical/lxd/shared" + "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/version" ) // swagger:operation DELETE /1.0/instances/{name} instances instance_delete @@ -85,8 +87,8 @@ func instanceDelete(d *Daemon, r *http.Request) response.Response { return inst.Delete(false) } - resources := map[string][]string{} - resources["instances"] = []string{name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name)} if inst.Type() == instancetype.Container { resources["containers"] = resources["instances"] diff --git a/lxd/instance_exec.go b/lxd/instance_exec.go index 96d701287265..f181358d2740 100644 --- a/lxd/instance_exec.go +++ b/lxd/instance_exec.go @@ -661,8 +661,8 @@ func instanceExecPost(d *Daemon, r *http.Request) response.Response { ws.instance = inst ws.req = post - resources := map[string][]string{} - resources["instances"] = []string{ws.instance.Name()} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", ws.instance.Name())} if ws.instance.Type() == instancetype.Container { resources["containers"] = resources["instances"] @@ -730,8 +730,8 @@ func instanceExecPost(d *Daemon, r *http.Request) response.Response { return nil } - resources := map[string][]string{} - resources["instances"] = []string{name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name)} if inst.Type() == instancetype.Container { resources["containers"] = resources["instances"] diff --git a/lxd/instance_post.go b/lxd/instance_post.go index 41cb54efe2ee..bcecadc6bdeb 100644 --- a/lxd/instance_post.go +++ b/lxd/instance_post.go @@ -25,6 +25,7 @@ import ( storagePools "github.com/canonical/lxd/lxd/storage" "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/version" ) // swagger:operation POST /1.0/instances/{name} instances instance_post @@ -255,8 +256,8 @@ func instancePost(d *Daemon, r *http.Request) response.Response { return instancePostPoolMigration(s, inst, req.Name, req.InstanceOnly, req.Pool, req.Live, req.AllowInconsistent, op) } - resources := map[string][]string{} - resources["instances"] = []string{name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name)} op, err := operations.OperationCreate(s, projectName, operations.OperationClassTask, operationtype.InstanceMigrate, resources, nil, run, nil, nil, r) if err != nil { return response.InternalError(err) @@ -277,8 +278,8 @@ func instancePost(d *Daemon, r *http.Request) response.Response { return instancePostProjectMigration(s, inst, req.Name, req.Project, req.InstanceOnly, req.Live, req.AllowInconsistent, op) } - resources := map[string][]string{} - resources["instances"] = []string{name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name)} op, err := operations.OperationCreate(s, projectName, operations.OperationClassTask, operationtype.InstanceMigrate, resources, nil, run, nil, nil, r) if err != nil { return response.InternalError(err) @@ -303,8 +304,8 @@ func instancePost(d *Daemon, r *http.Request) response.Response { return migrateInstance(s, r, inst, targetNode, req, op) } - resources := map[string][]string{} - resources["instances"] = []string{name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name)} if inst.Type() == instancetype.Container { resources["containers"] = resources["instances"] @@ -324,8 +325,8 @@ func instancePost(d *Daemon, r *http.Request) response.Response { return response.InternalError(err) } - resources := map[string][]string{} - resources["instances"] = []string{name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name)} if inst.Type() == instancetype.Container { resources["containers"] = resources["instances"] @@ -369,8 +370,8 @@ func instancePost(d *Daemon, r *http.Request) response.Response { return inst.Rename(req.Name, true) } - resources := map[string][]string{} - resources["instances"] = []string{name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name)} if inst.Type() == instancetype.Container { resources["containers"] = resources["instances"] @@ -597,8 +598,8 @@ func instancePostClusteringMigrate(s *state.State, r *http.Request, srcPool stor dest = dest.UseTarget(newMember.Name).UseProject(projectName) - resources := map[string][]string{} - resources["instances"] = []string{srcInstName} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", srcInstName)} srcInstRunning := srcInst.IsRunning() live := stateful && srcInstRunning diff --git a/lxd/instance_put.go b/lxd/instance_put.go index 428f8be929cb..cc8e9e1857c0 100644 --- a/lxd/instance_put.go +++ b/lxd/instance_put.go @@ -24,6 +24,7 @@ import ( "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" "github.com/canonical/lxd/shared/osarch" + "github.com/canonical/lxd/shared/version" ) // swagger:operation PUT /1.0/instances/{name} instances instance_put @@ -169,8 +170,8 @@ func instancePut(d *Daemon, r *http.Request) response.Response { opType = operationtype.SnapshotRestore } - resources := map[string][]string{} - resources["instances"] = []string{name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name)} if inst.Type() == instancetype.Container { resources["containers"] = resources["instances"] diff --git a/lxd/instance_snapshot.go b/lxd/instance_snapshot.go index 7efebecef582..7bbbb46d6089 100644 --- a/lxd/instance_snapshot.go +++ b/lxd/instance_snapshot.go @@ -324,8 +324,9 @@ func instanceSnapshotsPost(d *Daemon, r *http.Request) response.Response { return inst.Snapshot(req.Name, expiry, req.Stateful) } - resources := map[string][]string{} - resources["instances"] = []string{name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name)} + resources["instances_snapshots"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name, "snapshots", req.Name)} if inst.Type() == instancetype.Container { resources["containers"] = resources["instances"] @@ -523,11 +524,11 @@ func snapshotPut(s *state.State, r *http.Request, snapInst instance.Instance) re } opType := operationtype.SnapshotUpdate + parentName, snapName, _ := api.GetParentAndSnapshotName(snapInst.Name()) - parentName, _, _ := api.GetParentAndSnapshotName(snapInst.Name()) - - resources := map[string][]string{} - resources["instances"] = []string{parentName} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", parentName)} + resources["instances_snapshots"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", parentName, "snapshots", snapName)} if snapInst.Type() == instancetype.Container { resources["containers"] = resources["instances"] @@ -643,7 +644,7 @@ func snapshotPost(s *state.State, r *http.Request, snapInst instance.Instance) r return response.BadRequest(err) } - parentName, _, _ := api.GetParentAndSnapshotName(snapInst.Name()) + parentName, snapName, _ := api.GetParentAndSnapshotName(snapInst.Name()) migration, err := raw.GetBool("migration") if err == nil && migration { @@ -677,8 +678,9 @@ func snapshotPost(s *state.State, r *http.Request, snapInst instance.Instance) r return response.SmartError(err) } - resources := map[string][]string{} - resources["instances"] = []string{parentName} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", parentName)} + resources["instances_snapshots"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", parentName, "snapshots", snapName)} if snapInst.Type() == instancetype.Container { resources["containers"] = resources["instances"] @@ -730,8 +732,9 @@ func snapshotPost(s *state.State, r *http.Request, snapInst instance.Instance) r return snapInst.Rename(fullName, false) } - resources := map[string][]string{} - resources["instances"] = []string{parentName} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", parentName)} + resources["instances_snapshots"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", parentName, "snapshots", snapName)} if snapInst.Type() == instancetype.Container { resources["containers"] = resources["instances"] @@ -776,10 +779,11 @@ func snapshotDelete(s *state.State, r *http.Request, snapInst instance.Instance) return snapInst.Delete(false) } - parentName, _, _ := api.GetParentAndSnapshotName(snapInst.Name()) + parentName, snapName, _ := api.GetParentAndSnapshotName(snapInst.Name()) - resources := map[string][]string{} - resources["instances"] = []string{parentName} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", parentName)} + resources["instances_snapshots"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", parentName, "snapshots", snapName)} if snapInst.Type() == instancetype.Container { resources["containers"] = resources["instances"] diff --git a/lxd/instance_state.go b/lxd/instance_state.go index 97d098ee07a0..552cefe1791c 100644 --- a/lxd/instance_state.go +++ b/lxd/instance_state.go @@ -16,6 +16,7 @@ import ( "github.com/canonical/lxd/lxd/response" "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/version" ) // swagger:operation GET /1.0/instances/{name}/state instances instance_state_get @@ -199,8 +200,8 @@ func instanceStatePut(d *Daemon, r *http.Request) response.Response { return doInstanceStatePut(inst, req) } - resources := map[string][]string{} - resources["instances"] = []string{name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", name)} op, err := operations.OperationCreate(s, projectName, operations.OperationClassTask, opType, resources, nil, do, nil, nil, r) if err != nil { return response.InternalError(err) diff --git a/lxd/instances_post.go b/lxd/instances_post.go index 8a46e1b68325..cc2f5f026228 100644 --- a/lxd/instances_post.go +++ b/lxd/instances_post.go @@ -33,6 +33,7 @@ import ( "github.com/canonical/lxd/shared/api" "github.com/canonical/lxd/shared/logger" "github.com/canonical/lxd/shared/osarch" + "github.com/canonical/lxd/shared/version" ) func ensureDownloadedImageFitWithinBudget(s *state.State, r *http.Request, op *operations.Operation, p api.Project, img *api.Image, imgAlias string, source api.InstanceSource, imgType string) (*api.Image, error) { @@ -118,8 +119,8 @@ func createFromImage(s *state.State, r *http.Request, p api.Project, profiles [] return instanceCreateFromImage(s, r, img, args, op) } - resources := map[string][]string{} - resources["instances"] = []string{req.Name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", req.Name)} if dbType == instancetype.Container { resources["containers"] = resources["instances"] @@ -168,8 +169,8 @@ func createFromNone(s *state.State, r *http.Request, projectName string, profile return err } - resources := map[string][]string{} - resources["instances"] = []string{req.Name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", req.Name)} if dbType == instancetype.Container { resources["containers"] = resources["instances"] @@ -371,8 +372,8 @@ func createFromMigration(s *state.State, r *http.Request, projectName string, pr return nil } - resources := map[string][]string{} - resources["instances"] = []string{req.Name} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", req.Name)} if dbType == instancetype.Container { resources["containers"] = resources["instances"] @@ -541,8 +542,8 @@ func createFromCopy(s *state.State, r *http.Request, projectName string, profile return nil } - resources := map[string][]string{} - resources["instances"] = []string{req.Name, req.Source.Source} + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", req.Name), *api.NewURL().Path(version.APIVersion, "instances", req.Source.Source)} if dbType == instancetype.Container { resources["containers"] = resources["instances"] @@ -728,9 +729,8 @@ func createFromBackup(s *state.State, r *http.Request, projectName string, data return nil } - resources := map[string][]string{} - resources["instances"] = []string{bInfo.Name} - resources["containers"] = resources["instances"] + resources := map[string][]api.URL{} + resources["instances"] = []api.URL{*api.NewURL().Path(version.APIVersion, "instances", bInfo.Name)} op, err := operations.OperationCreate(s, bInfo.Project, operations.OperationClassTask, operationtype.BackupRestore, resources, nil, run, nil, nil, r) if err != nil { @@ -1297,7 +1297,7 @@ func clusterCopyContainerInternal(s *state.State, r *http.Request, source instan req.Source.Type = "migration" req.Source.Certificate = string(s.Endpoints.NetworkCert().PublicKey()) req.Source.Mode = "pull" - req.Source.Operation = fmt.Sprintf("https://%s/1.0/operations/%s", nodeAddress, opAPI.ID) + req.Source.Operation = fmt.Sprintf("https://%s/%s/operations/%s", nodeAddress, version.APIVersion, opAPI.ID) req.Source.Websockets = websockets req.Source.Source = "" req.Source.Project = "" diff --git a/lxd/instances_put.go b/lxd/instances_put.go index f96995f88cfa..b50172bc2afb 100644 --- a/lxd/instances_put.go +++ b/lxd/instances_put.go @@ -16,6 +16,7 @@ import ( "github.com/canonical/lxd/lxd/response" "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/version" ) func coalesceErrors(local bool, errors map[string]error) error { @@ -256,8 +257,11 @@ func instancesPut(d *Daemon, r *http.Request) response.Response { return coalesceErrors(true, failures) } - resources := map[string][]string{} - resources["instances"] = names + resources := map[string][]api.URL{} + for _, instName := range names { + resources["instances"] = append(resources["instances"], *api.NewURL().Path(version.APIVersion, "instances", instName)) + } + op, err := operations.OperationCreate(s, projectName, operations.OperationClassTask, opType, resources, nil, do, nil, nil, r) if err != nil { return response.InternalError(err) diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go index 6a838695b741..ce564140b7db 100644 --- a/lxd/storage_volumes.go +++ b/lxd/storage_volumes.go @@ -889,8 +889,8 @@ func doVolumeMigration(s *state.State, r *http.Request, requestProjectName strin return response.InternalError(err) } - resources := map[string][]string{} - resources["storage_volumes"] = []string{fmt.Sprintf("%s/volumes/custom/%s", poolName, req.Name)} + resources := map[string][]api.URL{} + resources["storage_volumes"] = []api.URL{*api.NewURL().Path(version.APIVersion, "storage-pools", poolName, "volumes", "custom", req.Name)} run := func(op *operations.Operation) error { // And finally run the migration. @@ -1159,8 +1159,8 @@ func storagePoolVolumeTypePostMigration(state *state.State, r *http.Request, req return response.InternalError(err) } - resources := map[string][]string{} - resources["storage_volumes"] = []string{fmt.Sprintf("%s/volumes/custom/%s", poolName, volumeName)} + resources := map[string][]api.URL{} + resources["storage_volumes"] = []api.URL{*api.NewURL().Path(version.APIVersion, "storage-pools", poolName, "volumes", "custom", volumeName)} run := func(op *operations.Operation) error { return ws.DoStorage(state, projectName, poolName, volumeName, op) @@ -1992,8 +1992,8 @@ func createStoragePoolVolumeFromBackup(s *state.State, r *http.Request, requestP return nil } - resources := map[string][]string{} - resources["storage_volumes"] = []string{bInfo.Name} + resources := map[string][]api.URL{} + resources["storage_volumes"] = []api.URL{*api.NewURL().Path(version.APIVersion, "storage-pools", bInfo.Pool, "volumes", string(bInfo.Type), bInfo.Name)} op, err := operations.OperationCreate(s, requestProjectName, operations.OperationClassTask, operationtype.CustomVolumeBackupRestore, resources, nil, run, nil, nil, r) if err != nil { diff --git a/lxd/storage_volumes_backup.go b/lxd/storage_volumes_backup.go index c02790714a39..3df8222bffb7 100644 --- a/lxd/storage_volumes_backup.go +++ b/lxd/storage_volumes_backup.go @@ -216,8 +216,7 @@ func storagePoolVolumeTypeCustomBackupsGet(d *Daemon, r *http.Request) response. for _, backup := range backups { if !recursion { - url := fmt.Sprintf("/%s/storage-pools/%s/volumes/custom/%s/backups/%s", - version.APIVersion, poolName, volumeName, strings.Split(backup.Name(), "/")[1]) + url := api.NewURL().Path(version.APIVersion, "storage-pools", poolName, "volumes", "custom", volumeName, "backups", strings.Split(backup.Name(), "/")[1]).String() resultString = append(resultString, url) } else { render := backup.Render() @@ -424,9 +423,9 @@ func storagePoolVolumeTypeCustomBackupsPost(d *Daemon, r *http.Request) response return nil } - resources := map[string][]string{} - resources["storage_volumes"] = []string{volumeName} - resources["backups"] = []string{req.Name} + resources := map[string][]api.URL{} + resources["storage_volumes"] = []api.URL{*api.NewURL().Path(version.APIVersion, "storage-pools", poolName, "volumes", volumeTypeName, volumeName)} + resources["backups"] = []api.URL{*api.NewURL().Path(version.APIVersion, "storage-pools", poolName, "volumes", volumeTypeName, volumeName, "backups", req.Name)} op, err := operations.OperationCreate(s, projectParam(r), operations.OperationClassTask, operationtype.CustomVolumeBackupCreate, resources, nil, backup, nil, nil, r) if err != nil { @@ -665,8 +664,9 @@ func storagePoolVolumeTypeCustomBackupPost(d *Daemon, r *http.Request) response. return nil } - resources := map[string][]string{} - resources["volume"] = []string{volumeName} + resources := map[string][]api.URL{} + resources["storage_volumes"] = []api.URL{*api.NewURL().Path(version.APIVersion, "storage-pools", poolName, "volumes", volumeTypeName, volumeName)} + resources["backups"] = []api.URL{*api.NewURL().Path(version.APIVersion, "storage-pools", poolName, "volumes", volumeTypeName, volumeName, "backups", oldName)} op, err := operations.OperationCreate(s, projectParam(r), operations.OperationClassTask, operationtype.CustomVolumeBackupRename, resources, nil, rename, nil, nil, r) if err != nil { @@ -778,8 +778,9 @@ func storagePoolVolumeTypeCustomBackupDelete(d *Daemon, r *http.Request) respons return nil } - resources := map[string][]string{} - resources["volume"] = []string{volumeName} + resources := map[string][]api.URL{} + resources["storage_volumes"] = []api.URL{*api.NewURL().Path(version.APIVersion, "storage-pools", poolName, "volumes", volumeTypeName, volumeName)} + resources["backups"] = []api.URL{*api.NewURL().Path(version.APIVersion, "storage-pools", poolName, "volumes", volumeTypeName, volumeName, "backups", backupName)} op, err := operations.OperationCreate(s, projectParam(r), operations.OperationClassTask, operationtype.CustomVolumeBackupRemove, resources, nil, remove, nil, nil, r) if err != nil { diff --git a/lxd/storage_volumes_snapshot.go b/lxd/storage_volumes_snapshot.go index 38cd977b6495..d5ef17403ff5 100644 --- a/lxd/storage_volumes_snapshot.go +++ b/lxd/storage_volumes_snapshot.go @@ -229,8 +229,9 @@ func storagePoolVolumeSnapshotsTypePost(d *Daemon, r *http.Request) response.Res return pool.CreateCustomVolumeSnapshot(projectName, volumeName, req.Name, expiry, op) } - resources := map[string][]string{} - resources["storage_volumes"] = []string{volumeName} + resources := map[string][]api.URL{} + resources["storage_volumes"] = []api.URL{*api.NewURL().Path(version.APIVersion, "storage-pools", poolName, "volumes", volumeTypeName, volumeName)} + resources["storage_volume_snapshots"] = []api.URL{*api.NewURL().Path(version.APIVersion, "storage-pools", poolName, "volumes", volumeTypeName, volumeName, "snapshots", req.Name)} op, err := operations.OperationCreate(s, projectParam(r), operations.OperationClassTask, operationtype.VolumeSnapshotCreate, resources, nil, snapshot, nil, nil, r) if err != nil { @@ -558,8 +559,8 @@ func storagePoolVolumeSnapshotTypePost(d *Daemon, r *http.Request) response.Resp return pool.RenameCustomVolumeSnapshot(projectName, fullSnapshotName, req.Name, op) } - resources := map[string][]string{} - resources["storage_volume_snapshots"] = []string{volumeName} + resources := map[string][]api.URL{} + resources["storage_volume_snapshots"] = []api.URL{*api.NewURL().Path(version.APIVersion, "storage-pools", poolName, "volumes", volumeTypeName, volumeName, "snapshots", snapshotName)} op, err := operations.OperationCreate(s, projectParam(r), operations.OperationClassTask, operationtype.VolumeSnapshotRename, resources, nil, snapshotRename, nil, nil, r) if err != nil { @@ -1085,8 +1086,8 @@ func storagePoolVolumeSnapshotTypeDelete(d *Daemon, r *http.Request) response.Re return pool.DeleteCustomVolumeSnapshot(projectName, fullSnapshotName, op) } - resources := map[string][]string{} - resources["storage_volume_snapshots"] = []string{volumeName} + resources := map[string][]api.URL{} + resources["storage_volume_snapshots"] = []api.URL{*api.NewURL().Path(version.APIVersion, "storage-pools", poolName, "volumes", volumeTypeName, volumeName, "snapshots", snapshotName)} op, err := operations.OperationCreate(s, projectParam(r), operations.OperationClassTask, operationtype.VolumeSnapshotDelete, resources, nil, snapshotDelete, nil, nil, r) if err != nil { From ebc4d4a4e3886b6a328cffce1416a4002bbb0f00 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Tue, 13 Jun 2023 14:24:20 +0200 Subject: [PATCH 044/543] lxd: replace "1.0" API version by `version.APIVersion` Signed-off-by: Gabriel Mougard --- lxd/instance_console.go | 2 +- lxd/instance_exec.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lxd/instance_console.go b/lxd/instance_console.go index a7d2849c0ec5..676d137d3d1d 100644 --- a/lxd/instance_console.go +++ b/lxd/instance_console.go @@ -446,7 +446,7 @@ func instanceConsolePost(d *Daemon, r *http.Request) response.Response { } if client != nil { - url := api.NewURL().Path("1.0", "instances", name, "console").Project(projectName) + url := api.NewURL().Path(version.APIVersion, "instances", name, "console").Project(projectName) resp, _, err := client.RawQuery("POST", url.String(), post, "") if err != nil { return response.SmartError(err) diff --git a/lxd/instance_exec.go b/lxd/instance_exec.go index f181358d2740..b3018bf6c01e 100644 --- a/lxd/instance_exec.go +++ b/lxd/instance_exec.go @@ -547,7 +547,7 @@ func instanceExecPost(d *Daemon, r *http.Request) response.Response { } if client != nil { - url := api.NewURL().Path("1.0", "instances", name, "exec").Project(projectName) + url := api.NewURL().Path(version.APIVersion, "instances", name, "exec").Project(projectName) resp, _, err := client.RawQuery("POST", url.String(), post, "") if err != nil { return response.SmartError(err) From f83b4fa41ff8582f75d3b0c20fed60bc1877ea42 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Mon, 5 Jun 2023 17:58:16 +0200 Subject: [PATCH 045/543] lxd-agent: Use api.URL in resources passed to `OperationCreate` Signed-off-by: Gabriel Mougard --- lxd-agent/exec.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd-agent/exec.go b/lxd-agent/exec.go index fb3b37420305..ab8be9f05748 100644 --- a/lxd-agent/exec.go +++ b/lxd-agent/exec.go @@ -134,7 +134,7 @@ func execPost(d *Daemon, r *http.Request) response.Response { ws.uid = post.User ws.gid = post.Group - resources := map[string][]string{} + resources := map[string][]api.URL{} op, err := operations.OperationCreate(nil, "", operations.OperationClassWebsocket, operationtype.CommandExec, resources, ws.Metadata(), ws.Do, nil, ws.Connect, r) if err != nil { From 8db6b535fe7c7fa5a5ffc0ce6610b9b53a9a8772 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Thu, 15 Jun 2023 10:46:31 -0400 Subject: [PATCH 046/543] Makefile: ensure that update-po fails on any error Normally, make aborts on error bug the for loop is considered as one command and the `rm -f` part always succeeds masking any issue `msgmerge` could have. Signed-off-by: Simon Deziel --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index b7a05da34550..cb99b5207e27 100644 --- a/Makefile +++ b/Makefile @@ -228,6 +228,7 @@ po/%.po: po/$(DOMAIN).pot .PHONY: update-po update-po: + set -eu; \ for lang in $(LINGUAS); do\ msgmerge -U $$lang.po po/$(DOMAIN).pot; \ rm -f $$lang.po~; \ From 7f5230f1b33f981ca7d05bee3d9b203affce934c Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Thu, 15 Jun 2023 10:49:33 -0400 Subject: [PATCH 047/543] Makefile: tell msgmerge to not create backups that we delete afterward Signed-off-by: Simon Deziel --- Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Makefile b/Makefile index cb99b5207e27..1ba4ad9c7713 100644 --- a/Makefile +++ b/Makefile @@ -230,8 +230,7 @@ po/%.po: po/$(DOMAIN).pot update-po: set -eu; \ for lang in $(LINGUAS); do\ - msgmerge -U $$lang.po po/$(DOMAIN).pot; \ - rm -f $$lang.po~; \ + msgmerge --backup=none -U $$lang.po po/$(DOMAIN).pot; \ done .PHONY: update-pot From 9b21569ce5cd2083a5f0c642960944d48eafd500 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Thu, 15 Jun 2023 11:59:00 +0100 Subject: [PATCH 048/543] lxd/storage/drivers/driver/btrfs/utils: Don't fail on failure to set subvolume readonly during delete If it is a problem then the actual delete will still fail. Signed-off-by: Thomas Parrott --- lxd/storage/drivers/driver_btrfs_utils.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lxd/storage/drivers/driver_btrfs_utils.go b/lxd/storage/drivers/driver_btrfs_utils.go index d0b8da581589..e08cf56462fb 100644 --- a/lxd/storage/drivers/driver_btrfs_utils.go +++ b/lxd/storage/drivers/driver_btrfs_utils.go @@ -187,9 +187,10 @@ func (d *btrfs) deleteSubvolume(rootPath string, recursion bool) error { return err } + // Try and ensure volume is writable to possibility of destroy failing. err := d.setSubvolumeReadonlyProperty(rootPath, false) if err != nil { - return fmt.Errorf("Failed setting subvolume writable %q: %w", rootPath, err) + d.logger.Warn("Failed setting subvolume writable", logger.Ctx{"path": rootPath, "err": err}) } // Attempt to delete the root subvol itself (short path). @@ -212,7 +213,7 @@ func (d *btrfs) deleteSubvolume(rootPath string, recursion bool) error { subSubVolPath := filepath.Join(rootPath, subSubVol) err = d.setSubvolumeReadonlyProperty(subSubVolPath, false) if err != nil { - return fmt.Errorf("Failed setting subvolume writable %q: %w", subSubVolPath, err) + d.logger.Warn("Failed setting subvolume writable", logger.Ctx{"path": subSubVolPath, "err": err}) } } From 7829eecc8b640fc25b2a9c5e429605aab4af7daf Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Thu, 15 Jun 2023 13:34:52 +0100 Subject: [PATCH 049/543] lxd/storage/drivers/driver/btrfs/utils: Don't try and delete subvolume twice if failed first time and recursion not enabled Signed-off-by: Thomas Parrott --- lxd/storage/drivers/driver_btrfs_utils.go | 47 ++++++++++++----------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/lxd/storage/drivers/driver_btrfs_utils.go b/lxd/storage/drivers/driver_btrfs_utils.go index e08cf56462fb..6b7447d28772 100644 --- a/lxd/storage/drivers/driver_btrfs_utils.go +++ b/lxd/storage/drivers/driver_btrfs_utils.go @@ -197,34 +197,35 @@ func (d *btrfs) deleteSubvolume(rootPath string, recursion bool) error { err = destroy(rootPath) if err == nil { return nil + } else if !recursion { + return fmt.Errorf("Failed deleting subvolume %q: %w", rootPath, err) } - // Delete subsubvols. - if recursion { - // Get the subvolumes list. - subSubVols, err := d.getSubvolumes(rootPath) - if err != nil { - return err - } + // Delete subsubvols as recursion enabled. - // Perform a first pass and ensure all sub volumes are writable. - sort.Sort(sort.StringSlice(subSubVols)) - for _, subSubVol := range subSubVols { - subSubVolPath := filepath.Join(rootPath, subSubVol) - err = d.setSubvolumeReadonlyProperty(subSubVolPath, false) - if err != nil { - d.logger.Warn("Failed setting subvolume writable", logger.Ctx{"path": subSubVolPath, "err": err}) - } + // Get the subvolumes list. + subSubVols, err := d.getSubvolumes(rootPath) + if err != nil { + return err + } + + // Perform a first pass and ensure all sub volumes are writable. + sort.Sort(sort.StringSlice(subSubVols)) + for _, subSubVol := range subSubVols { + subSubVolPath := filepath.Join(rootPath, subSubVol) + err = d.setSubvolumeReadonlyProperty(subSubVolPath, false) + if err != nil { + d.logger.Warn("Failed setting subvolume writable", logger.Ctx{"path": subSubVolPath, "err": err}) } + } - // Perform a second pass to delete subvolumes. - sort.Sort(sort.Reverse(sort.StringSlice(subSubVols))) - for _, subSubVol := range subSubVols { - subSubVolPath := filepath.Join(rootPath, subSubVol) - err := destroy(subSubVolPath) - if err != nil { - return fmt.Errorf("Failed deleting subvolume %q: %w", subSubVolPath, err) - } + // Perform a second pass to delete subvolumes. + sort.Sort(sort.Reverse(sort.StringSlice(subSubVols))) + for _, subSubVol := range subSubVols { + subSubVolPath := filepath.Join(rootPath, subSubVol) + err := destroy(subSubVolPath) + if err != nil { + return fmt.Errorf("Failed deleting subvolume %q: %w", subSubVolPath, err) } } From 4cc3e0a464e827503208d7d779aedafdd89b1fdd Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Thu, 15 Jun 2023 15:12:52 +0100 Subject: [PATCH 050/543] test: Add debug logging for clustering events tests Trying to work out why they intermittently fail. Signed-off-by: Thomas Parrott --- test/suites/clustering.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh index 70e5ca86159c..979eda4eaaab 100644 --- a/test/suites/clustering.sh +++ b/test/suites/clustering.sh @@ -3539,6 +3539,7 @@ test_clustering_events() { # Check events were distributed. for i in 1 2 3; do + cat "${TEST_DIR}/node${i}.log" grep -Fc "instance-restarted" "${TEST_DIR}/node${i}.log" | grep -Fx 6 done @@ -3571,6 +3572,7 @@ test_clustering_events() { grep -Fc "instance-restarted" "${TEST_DIR}/node1.log" grep -Fc "instance-restarted" "${TEST_DIR}/node1.log" | grep -Fx 7 for i in 2 3; do + cat "${TEST_DIR}/node${i}.log" grep -Fc "instance-restarted" "${TEST_DIR}/node${i}.log" | grep -Fx 6 done From c5c98de4a46449a227e61ed4ae735adebdabd701 Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 15 Jun 2023 15:03:07 +0000 Subject: [PATCH 051/543] lxd/storage/drivers/btrfs: Add FillConfig function Signed-off-by: Din Music --- lxd/storage/drivers/driver_btrfs.go | 42 ++++++++++++++++++----------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/lxd/storage/drivers/driver_btrfs.go b/lxd/storage/drivers/driver_btrfs.go index d41f47c6a312..26101adf921a 100644 --- a/lxd/storage/drivers/driver_btrfs.go +++ b/lxd/storage/drivers/driver_btrfs.go @@ -103,6 +103,27 @@ func (d *btrfs) Info() Info { } } +// FillConfig populates the storage pool's configuration file with the default values. +func (d *btrfs) FillConfig() error { + loopPath := loopFilePath(d.name) + if d.config["source"] == "" || d.config["source"] == loopPath { + // Pick a default size of the loop file if not specified. + if d.config["size"] == "" { + defaultSize, err := loopFileSizeDefault() + if err != nil { + return err + } + + d.config["size"] = fmt.Sprintf("%dGiB", defaultSize) + } + } else { + // Unset size property since it's irrelevant. + d.config["size"] = "" + } + + return nil +} + // Create is called during pool creation and is effectively using an empty driver struct. // WARNING: The Create() function cannot rely on any of the struct attributes being set. func (d *btrfs) Create() error { @@ -112,21 +133,16 @@ func (d *btrfs) Create() error { revert := revert.New() defer revert.Fail() + err := d.FillConfig() + if err != nil { + return err + } + loopPath := loopFilePath(d.name) if d.config["source"] == "" || d.config["source"] == loopPath { // Create a loop based pool. d.config["source"] = loopPath - // Pick a default size of the loop file if not specified. - if d.config["size"] == "" { - defaultSize, err := loopFileSizeDefault() - if err != nil { - return err - } - - d.config["size"] = fmt.Sprintf("%dGiB", defaultSize) - } - // Create the loop file itself. size, err := units.ParseByteSizeString(d.config["size"]) if err != nil { @@ -146,9 +162,6 @@ func (d *btrfs) Create() error { return fmt.Errorf("Failed to format sparse file: %w", err) } } else if shared.IsBlockdevPath(d.config["source"]) { - // Unset size property since it's irrelevant. - d.config["size"] = "" - // Wipe if requested. if shared.IsTrue(d.config["source.wipe"]) { err := wipeBlockHeaders(d.config["source"]) @@ -177,9 +190,6 @@ func (d *btrfs) Create() error { d.config["source"] = devUUID } } else if d.config["source"] != "" { - // Unset size property since it's irrelevant. - d.config["size"] = "" - hostPath := shared.HostPath(d.config["source"]) if d.isSubvolume(hostPath) { // Existing btrfs subvolume. From cafab145da7fea7694553ab474f2d43d60135bbe Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 15 Jun 2023 15:03:22 +0000 Subject: [PATCH 052/543] lxd/storage/drivers/ceph: Add FillConfig function Signed-off-by: Din Music --- lxd/storage/drivers/driver_ceph.go | 39 ++++++++++++++++++------------ 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/lxd/storage/drivers/driver_ceph.go b/lxd/storage/drivers/driver_ceph.go index d5385ff7e747..84b44bd76c5d 100644 --- a/lxd/storage/drivers/driver_ceph.go +++ b/lxd/storage/drivers/driver_ceph.go @@ -95,15 +95,8 @@ func (d *ceph) getPlaceholderVolume() Volume { return NewVolume(d, d.name, VolumeType("lxd"), ContentTypeFS, d.config["ceph.osd.pool_name"], nil, nil) } -// Create is called during pool creation and is effectively using an empty driver struct. -// WARNING: The Create() function cannot rely on any of the struct attributes being set. -func (d *ceph) Create() error { - revert := revert.New() - defer revert.Fail() - - d.config["volatile.initial_source"] = d.config["source"] - - // Set default properties if missing. +// FillConfig populates the storage pool's configuration file with the default values. +func (d *ceph) FillConfig() error { if d.config["ceph.cluster_name"] == "" { d.config["ceph.cluster_name"] = CephDefaultCluster } @@ -114,12 +107,28 @@ func (d *ceph) Create() error { if d.config["ceph.osd.pg_num"] == "" { d.config["ceph.osd.pg_num"] = "32" - } else { - // Validate. - _, err := units.ParseByteSizeString(d.config["ceph.osd.pg_num"]) - if err != nil { - return err - } + } + + return nil +} + +// Create is called during pool creation and is effectively using an empty driver struct. +// WARNING: The Create() function cannot rely on any of the struct attributes being set. +func (d *ceph) Create() error { + revert := revert.New() + defer revert.Fail() + + d.config["volatile.initial_source"] = d.config["source"] + + err := d.FillConfig() + if err != nil { + return err + } + + // Validate. + _, err = units.ParseByteSizeString(d.config["ceph.osd.pg_num"]) + if err != nil { + return err } // Quick check. From 088964113e9c9f3c1f82b95184f56757911d5e60 Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 15 Jun 2023 15:03:38 +0000 Subject: [PATCH 053/543] lxd/storage/drivers/cephfs: Add FillConfig function Signed-off-by: Din Music --- lxd/storage/drivers/driver_cephfs.go | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/lxd/storage/drivers/driver_cephfs.go b/lxd/storage/drivers/driver_cephfs.go index ba71e5b34250..5fee0abe3dc9 100644 --- a/lxd/storage/drivers/driver_cephfs.go +++ b/lxd/storage/drivers/driver_cephfs.go @@ -87,9 +87,27 @@ func (d *cephfs) Info() Info { } } +// FillConfig populates the storage pool's configuration file with the default values. +func (d *cephfs) FillConfig() error { + if d.config["cephfs.cluster_name"] == "" { + d.config["cephfs.cluster_name"] = CephDefaultCluster + } + + if d.config["cephfs.user.name"] == "" { + d.config["cephfs.user.name"] = CephDefaultUser + } + + return nil +} + // Create is called during pool creation and is effectively using an empty driver struct. // WARNING: The Create() function cannot rely on any of the struct attributes being set. func (d *cephfs) Create() error { + err := d.FillConfig() + if err != nil { + return err + } + // Config validation. if d.config["source"] == "" { return fmt.Errorf("Missing required source name/path") @@ -99,15 +117,6 @@ func (d *cephfs) Create() error { return fmt.Errorf("cephfs.path must match the source") } - // Set default properties if missing. - if d.config["cephfs.cluster_name"] == "" { - d.config["cephfs.cluster_name"] = CephDefaultCluster - } - - if d.config["cephfs.user.name"] == "" { - d.config["cephfs.user.name"] = CephDefaultUser - } - d.config["cephfs.path"] = d.config["source"] // Parse the namespace / path. From 8d5c6eb43792250197fa3a811c5d1784aa9f6f75 Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 15 Jun 2023 15:04:01 +0000 Subject: [PATCH 054/543] lxd/storage/drivers/cephobject: Add FillConfig function Signed-off-by: Din Music --- lxd/storage/drivers/driver_cephobject.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/lxd/storage/drivers/driver_cephobject.go b/lxd/storage/drivers/driver_cephobject.go index a0d20d5c1bd3..c34af5a2995d 100644 --- a/lxd/storage/drivers/driver_cephobject.go +++ b/lxd/storage/drivers/driver_cephobject.go @@ -105,10 +105,8 @@ func (d *cephobject) Validate(config map[string]string) error { return d.validatePool(config, rules, nil) } -// Create is called during pool creation and is effectively using an empty driver struct. -// WARNING: The Create() function cannot rely on any of the struct attributes being set. -func (d *cephobject) Create() error { - // Set default properties if missing. +// FillConfig populates the storage pool's configuration file with the default values. +func (d *cephobject) FillConfig() error { if d.config["cephobject.cluster_name"] == "" { d.config["cephobject.cluster_name"] = CephDefaultCluster } @@ -121,6 +119,17 @@ func (d *cephobject) Create() error { return fmt.Errorf(`"cephobject.radosgw.endpoint" option is required`) } + return nil +} + +// Create is called during pool creation and is effectively using an empty driver struct. +// WARNING: The Create() function cannot rely on any of the struct attributes being set. +func (d *cephobject) Create() error { + err := d.FillConfig() + if err != nil { + return err + } + // Check if there is an existing cephobjectRadosgwAdminUser user. adminUserInfo, _, err := d.radosgwadminGetUser(context.TODO(), cephobjectRadosgwAdminUser) if err != nil && !api.StatusErrorCheck(err, http.StatusNotFound) { From 475e651ce7ffa807bb443955765662f0c361e94e Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 15 Jun 2023 15:04:20 +0000 Subject: [PATCH 055/543] lxd/storage/drivers/dir: Add FillConfig function Signed-off-by: Din Music --- lxd/storage/drivers/driver_dir.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/lxd/storage/drivers/driver_dir.go b/lxd/storage/drivers/driver_dir.go index e8e1db22205b..ba0011f2d2e9 100644 --- a/lxd/storage/drivers/driver_dir.go +++ b/lxd/storage/drivers/driver_dir.go @@ -46,14 +46,24 @@ func (d *dir) Info() Info { } } -// Create is called during pool creation and is effectively using an empty driver struct. -// WARNING: The Create() function cannot rely on any of the struct attributes being set. -func (d *dir) Create() error { +// FillConfig populates the storage pool's configuration file with the default values. +func (d *dir) FillConfig() error { // Set default source if missing. if d.config["source"] == "" { d.config["source"] = GetPoolMountPath(d.name) } + return nil +} + +// Create is called during pool creation and is effectively using an empty driver struct. +// WARNING: The Create() function cannot rely on any of the struct attributes being set. +func (d *dir) Create() error { + err := d.FillConfig() + if err != nil { + return err + } + sourcePath := shared.HostPath(d.config["source"]) if !shared.PathExists(sourcePath) { From a3432b2e8fef64727b107e06531f3c05676008d9 Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 15 Jun 2023 15:04:27 +0000 Subject: [PATCH 056/543] lxd/storage/drivers/lvm: Add FillConfig function Signed-off-by: Din Music --- lxd/storage/drivers/driver_lvm.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/lxd/storage/drivers/driver_lvm.go b/lxd/storage/drivers/driver_lvm.go index db9ae200b685..16a2f62cea9c 100644 --- a/lxd/storage/drivers/driver_lvm.go +++ b/lxd/storage/drivers/driver_lvm.go @@ -97,6 +97,16 @@ func (d *lvm) Info() Info { } } +// FillConfig populates the storage pool's configuration file with the default values. +func (d *lvm) FillConfig() error { + // Set default thin pool name if not specified. + if d.usesThinpool() && d.config["lvm.thinpool_name"] == "" { + d.config["lvm.thinpool_name"] = lvmThinpoolDefaultName + } + + return nil +} + // Create creates the storage pool on the storage device. func (d *lvm) Create() error { d.config["volatile.initial_source"] = d.config["source"] @@ -110,9 +120,9 @@ func (d *lvm) Create() error { revert := revert.New() defer revert.Fail() - // Set default thin pool name if not specified. - if d.usesThinpool() && d.config["lvm.thinpool_name"] == "" { - d.config["lvm.thinpool_name"] = lvmThinpoolDefaultName + err = d.FillConfig() + if err != nil { + return err } var usingLoopFile bool From 67324d9e7b6e27e96401f8ad0448a163e0d4fe7f Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 15 Jun 2023 15:04:51 +0000 Subject: [PATCH 057/543] lxd/storage/drivers/zfs: Add FillConfig function Signed-off-by: Din Music --- lxd/storage/drivers/driver_zfs.go | 69 ++++++++++++++++++------------- 1 file changed, 41 insertions(+), 28 deletions(-) diff --git a/lxd/storage/drivers/driver_zfs.go b/lxd/storage/drivers/driver_zfs.go index 026439cb6507..3d83672d7371 100644 --- a/lxd/storage/drivers/driver_zfs.go +++ b/lxd/storage/drivers/driver_zfs.go @@ -174,12 +174,8 @@ func (d zfs) ensureInitialDatasets(warnOnExistingPolicyApplyError bool) error { return nil } -// Create is called during pool creation and is effectively using an empty driver struct. -// WARNING: The Create() function cannot rely on any of the struct attributes being set. -func (d *zfs) Create() error { - // Store the provided source as we are likely to be mangling it. - d.config["volatile.initial_source"] = d.config["source"] - +// FillConfig populates the storage pool's configuration file with the default values. +func (d *zfs) FillConfig() error { loopPath := loopFilePath(d.name) if d.config["source"] == "" || d.config["source"] == loopPath { // Create a loop based pool. @@ -190,11 +186,6 @@ func (d *zfs) Create() error { d.config["zfs.pool_name"] = d.name } - // Validate pool_name. - if strings.Contains(d.config["zfs.pool_name"], "/") { - return fmt.Errorf("zfs.pool_name can't point to a dataset when source isn't set") - } - // Pick a default size of the loop file if not specified. if d.config["size"] == "" { defaultSize, err := loopFileSizeDefault() @@ -204,6 +195,44 @@ func (d *zfs) Create() error { d.config["size"] = fmt.Sprintf("%dGiB", defaultSize) } + } else if filepath.IsAbs(d.config["source"]) { + // Set default pool_name. + if d.config["zfs.pool_name"] == "" { + d.config["zfs.pool_name"] = d.name + } + + // Unset size property since it's irrelevant. + d.config["size"] = "" + } else { + // Handle an existing zpool. + if d.config["zfs.pool_name"] == "" { + d.config["zfs.pool_name"] = d.config["source"] + } + + // Unset size property since it's irrelevant. + d.config["size"] = "" + } + + return nil +} + +// Create is called during pool creation and is effectively using an empty driver struct. +// WARNING: The Create() function cannot rely on any of the struct attributes being set. +func (d *zfs) Create() error { + // Store the provided source as we are likely to be mangling it. + d.config["volatile.initial_source"] = d.config["source"] + + err := d.FillConfig() + if err != nil { + return err + } + + loopPath := loopFilePath(d.name) + if d.config["source"] == "" || d.config["source"] == loopPath { + // Validate pool_name. + if strings.Contains(d.config["zfs.pool_name"], "/") { + return fmt.Errorf("zfs.pool_name can't point to a dataset when source isn't set") + } // Create the loop file itself. size, err := units.ParseByteSizeString(d.config["size"]) @@ -235,14 +264,6 @@ func (d *zfs) Create() error { return fmt.Errorf("Custom loop file locations are not supported") } - // Unset size property since it's irrelevant. - d.config["size"] = "" - - // Set default pool_name. - if d.config["zfs.pool_name"] == "" { - d.config["zfs.pool_name"] = d.name - } - // Validate pool_name. if strings.Contains(d.config["zfs.pool_name"], "/") { return fmt.Errorf("zfs.pool_name can't point to a dataset when source isn't set") @@ -281,14 +302,6 @@ func (d *zfs) Create() error { // We don't need to keep the original source path around for import. d.config["source"] = d.config["zfs.pool_name"] } else { - // Handle an existing zpool. - if d.config["zfs.pool_name"] == "" { - d.config["zfs.pool_name"] = d.config["source"] - } - - // Unset size property since it's irrelevant. - d.config["size"] = "" - // Validate pool_name. if d.config["zfs.pool_name"] != d.config["source"] { return fmt.Errorf("The source must match zfs.pool_name if specified") @@ -333,7 +346,7 @@ func (d *zfs) Create() error { revert.Add(func() { _ = d.Delete(nil) }) // Apply our default configuration. - err := d.ensureInitialDatasets(false) + err = d.ensureInitialDatasets(false) if err != nil { return err } From b0a22465929e8eeddd3ba43537c0a0a01b78db9f Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 15 Jun 2023 15:04:58 +0000 Subject: [PATCH 058/543] lxd/storage/drivers/mock: Add FillConfig function Signed-off-by: Din Music --- lxd/storage/drivers/driver_mock.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lxd/storage/drivers/driver_mock.go b/lxd/storage/drivers/driver_mock.go index 76f580c254ed..b0c73fa84c5e 100644 --- a/lxd/storage/drivers/driver_mock.go +++ b/lxd/storage/drivers/driver_mock.go @@ -36,6 +36,10 @@ func (d *mock) Info() Info { } } +func (d *mock) FillConfig() error { + return nil +} + func (d *mock) Create() error { return nil } From fcf65d9d433731e6f94c9864ded94beda4859f33 Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 15 Jun 2023 15:05:56 +0000 Subject: [PATCH 059/543] lxd/storage/drivers: Add FillConfig function to the storage driver interface Signed-off-by: Din Music --- lxd/storage/drivers/interface.go | 1 + 1 file changed, 1 insertion(+) diff --git a/lxd/storage/drivers/interface.go b/lxd/storage/drivers/interface.go index 896b47ba4dac..e0296ab96499 100644 --- a/lxd/storage/drivers/interface.go +++ b/lxd/storage/drivers/interface.go @@ -37,6 +37,7 @@ type Driver interface { Logger() logger.Logger // Pool. + FillConfig() error Create() error Delete(op *operations.Operation) error // Mount mounts a storage pool if needed, returns true if we caused a new mount, false if already mounted. From 66556736efee29c75aa60a4c6316a4bff92a102e Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 15 Jun 2023 15:11:04 +0000 Subject: [PATCH 060/543] lxd/api_internal_recover: Populate config defaults Signed-off-by: Din Music --- lxd/api_internal_recover.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lxd/api_internal_recover.go b/lxd/api_internal_recover.go index 74fdebf742b3..77bea1e9f7aa 100644 --- a/lxd/api_internal_recover.go +++ b/lxd/api_internal_recover.go @@ -177,6 +177,12 @@ func internalRecoverScan(s *state.State, userPools []api.StoragePoolsPost, valid return response.SmartError(fmt.Errorf("Failed to initialise unknown pool %q: %w", p.Name, err)) } + // Populate configuration with default values. + err := pool.Driver().FillConfig() + if err != nil { + return response.SmartError(fmt.Errorf("Failed to evaluate the default configuration values for unknown pool %q: %w", p.Name, err)) + } + err = pool.Driver().Validate(poolInfo.Config) if err != nil { return response.SmartError(fmt.Errorf("Failed config validation for unknown pool %q: %w", p.Name, err)) From 3822d4935fa76c902b08eac1795e8010e1c4d528 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Thu, 15 Jun 2023 17:06:51 -0400 Subject: [PATCH 061/543] lxc/info: Show mdev profile name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- lxc/info.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxc/info.go b/lxc/info.go index 7cca84663cb6..0ea983131a4f 100644 --- a/lxc/info.go +++ b/lxc/info.go @@ -158,7 +158,7 @@ func (c *cmdInfo) renderGPU(gpu api.ResourcesGPUCard, prefix string, initial boo for _, k := range keys { v := gpu.Mdev[k] - fmt.Println(prefix + " - " + fmt.Sprintf(i18n.G("%s (%d available)"), k, v.Available)) + fmt.Println(prefix + " - " + fmt.Sprintf(i18n.G("%s (%s) (%d available)"), k, v.Name, v.Available)) if v.Description != "" { for _, line := range strings.Split(v.Description, "\n") { fmt.Printf(prefix+" %s\n", line) From 196a539f9224ab0d6f04fa4b67b52329e845dd49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Thu, 15 Jun 2023 17:07:47 -0400 Subject: [PATCH 062/543] lxd/device/gpu/mdev: Add locking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- lxd/device/gpu_mdev.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lxd/device/gpu_mdev.go b/lxd/device/gpu_mdev.go index 8ba52233aac2..54927694c92d 100644 --- a/lxd/device/gpu_mdev.go +++ b/lxd/device/gpu_mdev.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "sync" "github.com/pborman/uuid" @@ -17,6 +18,8 @@ import ( "github.com/canonical/lxd/shared/logger" ) +var gpuMdevMu sync.Mutex + type gpuMdev struct { deviceCommon } @@ -44,6 +47,10 @@ func (d *gpuMdev) Stop() (*deviceConfig.RunConfig, error) { func (d *gpuMdev) startVM() (*deviceConfig.RunConfig, error) { runConf := deviceConfig.RunConfig{} + // Lock to prevent multiple concurrent mdev devices being setup. + gpuMdevMu.Lock() + defer gpuMdevMu.Unlock() + // Get any existing UUID. v := d.volatileGet() mdevUUID := v["vgpu.uuid"] From a6fcb75612c2eca11bc9b961e4e710d483785a9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Thu, 15 Jun 2023 18:03:14 -0400 Subject: [PATCH 063/543] lxd/instance/qemu: Disable x-vga on mdev GPUs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- lxd/instance/drivers/driver_qemu.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 232058a02523..b6a093716e87 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -3970,6 +3970,11 @@ func (d *qemu) addGPUDevConfig(cfg *[]cfgSection, bus *qemuBus, gpuConfig []devi } vgaMode := func() bool { + // No VGA mode on mdev. + if vgpu != "" { + return false + } + // No VGA mode on non-x86. if d.architecture != osarch.ARCH_64BIT_INTEL_X86 { return false From 8ec50acaeb9ad0fbb0a30a90f20fa7f36b3db187 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Fri, 16 Jun 2023 13:25:44 -0400 Subject: [PATCH 064/543] lxd/bgp: Allow one hour for LXD restart MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This should allow for larger clusters to refresh without losing advertisements. Signed-off-by: Stéphane Graber --- lxd/bgp/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/bgp/server.go b/lxd/bgp/server.go index 25bd2f03f5d5..6fbc230e5eb6 100644 --- a/lxd/bgp/server.go +++ b/lxd/bgp/server.go @@ -427,7 +427,7 @@ func (s *Server) addPeer(address net.IP, asn uint32, password string) error { // Allow for 120s offline before route removal. GracefulRestart: &bgpAPI.GracefulRestart{ Enabled: true, - RestartTime: 120, + RestartTime: 3600, }, // Always allow for the maximum multihop. From 234cf29e228a73a50489b61deb27aa037766868a Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 19 Jun 2023 08:19:52 +0100 Subject: [PATCH 065/543] lxd/instance/drivers/driver/qemu: Load vhost_vsock kernel module if /dev/kvm is available Otherwise /dev/vsock may not be present and VM support detection will fail. Signed-off-by: Thomas Parrott --- lxd/instance/drivers/driver_qemu.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index b6a093716e87..9d67b9dda6ab 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -7596,17 +7596,17 @@ func (d *qemu) Info() instance.Info { return data } - if !shared.PathExists("/dev/vsock") { - data.Error = fmt.Errorf("Vsock support is missing (no /dev/vsock)") - return data - } - err := util.LoadModule("vhost_vsock") if err != nil { data.Error = fmt.Errorf("vhost_vsock kernel module not loaded") return data } + if !shared.PathExists("/dev/vsock") { + data.Error = fmt.Errorf("Vsock support is missing (no /dev/vsock)") + return data + } + hostArch, err := osarch.ArchitectureGetLocalID() if err != nil { logger.Errorf("Failed getting CPU architecture during QEMU initialization: %v", err) From 17b47492ed5f4f0e32c574da22b8fb4f2a71ed0d Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 19 Jun 2023 08:54:49 +0100 Subject: [PATCH 066/543] shared/util: Use more efficient ReadDir in PathIsEmpty Signed-off-by: Thomas Parrott --- shared/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared/util.go b/shared/util.go index 811b520d877f..85e46b6d18ae 100644 --- a/shared/util.go +++ b/shared/util.go @@ -85,7 +85,7 @@ func PathIsEmpty(path string) (bool, error) { defer func() { _ = f.Close() }() // read in ONLY one file - _, err = f.Readdir(1) + _, err = f.ReadDir(1) // and if the file is EOF... well, the dir is empty. if err == io.EOF { From 6ad542e41ccf5b5772b4989fd6630dc42b5bdb01 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Mon, 19 Jun 2023 13:26:23 +0200 Subject: [PATCH 067/543] lxd/resources: Refactor `resources.ParseCpuset` Signed-off-by: Gabriel Mougard --- lxd/resources/cpu.go | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/lxd/resources/cpu.go b/lxd/resources/cpu.go index 486b56c9db6c..f4893a9bf83f 100644 --- a/lxd/resources/cpu.go +++ b/lxd/resources/cpu.go @@ -41,42 +41,54 @@ func GetCPUIsolated() []int64 { return isolatedCpusInt } -// ParseCpuset parses a limits.cpu range into a list of CPU ids. -func ParseCpuset(cpu string) ([]int64, error) { - cpus := []int64{} - chunks := strings.Split(cpu, ",") +// parseRangedListToInt64Slice takes an `input` of the form "1,2,8-10,5-7" and returns a slice of int64s +// containing the expanded list of numbers. In this example, the returned slice would be [1,2,8,9,10,5,6,7]. +// The elements in the output slice are meant to represent hardware entity identifiers (e.g, either CPU or NUMA node IDs). +func parseRangedListToInt64Slice(input string) ([]int64, error) { + res := []int64{} + chunks := strings.Split(input, ",") for _, chunk := range chunks { if strings.Contains(chunk, "-") { // Range fields := strings.SplitN(chunk, "-", 2) if len(fields) != 2 { - return nil, fmt.Errorf("Invalid cpuset value: %s", cpu) + return nil, fmt.Errorf("Invalid CPU/NUMA set value: %q", input) } low, err := strconv.ParseInt(fields[0], 10, 64) if err != nil { - return nil, fmt.Errorf("Invalid cpuset value: %s", cpu) + return nil, fmt.Errorf("Invalid CPU/NUMA set value: %w", err) } high, err := strconv.ParseInt(fields[1], 10, 64) if err != nil { - return nil, fmt.Errorf("Invalid cpuset value: %s", cpu) + return nil, fmt.Errorf("Invalid CPU/NUMA set value: %w", err) } for i := low; i <= high; i++ { - cpus = append(cpus, i) + res = append(res, i) } } else { // Simple entry nr, err := strconv.ParseInt(chunk, 10, 64) if err != nil { - return nil, fmt.Errorf("Invalid cpuset value: %s", cpu) + return nil, fmt.Errorf("Invalid CPU/NUMA set value: %w", err) } - cpus = append(cpus, nr) + res = append(res, nr) } } + return res, nil +} + +// ParseCpuset parses a `limits.cpu` range into a list of CPU ids. +func ParseCpuset(cpu string) ([]int64, error) { + cpus, err := parseRangedListToInt64Slice(cpu) + if err != nil { + return nil, fmt.Errorf("Invalid cpuset value %q: %w", cpu, err) + } + return cpus, nil } From 0d62db129f6780605c424cde2aeb651f8f9f1ede Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Mon, 19 Jun 2023 13:27:37 +0200 Subject: [PATCH 068/543] lxd/resources: Add `resources.ParseNumaNodeSet` Signed-off-by: Gabriel Mougard --- lxd/resources/cpu.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lxd/resources/cpu.go b/lxd/resources/cpu.go index f4893a9bf83f..9eda0586c1d8 100644 --- a/lxd/resources/cpu.go +++ b/lxd/resources/cpu.go @@ -92,6 +92,16 @@ func ParseCpuset(cpu string) ([]int64, error) { return cpus, nil } +// ParseNumaNodeSet parses a `limits.cpu.nodes` into a list of NUMA node ids. +func ParseNumaNodeSet(numaNodeSet string) ([]int64, error) { + nodes, err := parseRangedListToInt64Slice(numaNodeSet) + if err != nil { + return nil, fmt.Errorf("Invalid NUMA node set value %q: %w", numaNodeSet, err) + } + + return nodes, nil +} + func getCPUCache(path string) ([]api.ResourcesCPUCache, error) { caches := []api.ResourcesCPUCache{} From fc3c09b0698a2ad44625223949162a2432a68cba Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Mon, 19 Jun 2023 13:28:24 +0200 Subject: [PATCH 069/543] lxd: schedule instance on NUMA nodes if specified Signed-off-by: Gabriel Mougard --- lxd/devices.go | 148 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 136 insertions(+), 12 deletions(-) diff --git a/lxd/devices.go b/lxd/devices.go index c3ef187c666d..1e9a2135354b 100644 --- a/lxd/devices.go +++ b/lxd/devices.go @@ -314,6 +314,102 @@ func deviceNetlinkListener() (chan []string, chan []string, chan device.USBEvent return chCPU, chNetwork, chUSB, chUnix, nil } +/* + * fillFixedInstances fills the `fixedInstances` map with the instances that have been pinned to specific CPUs. + * The `fixedInstances` map is a map of CPU IDs to a list of instances that have been pinned to that CPU. + * The `targetCpuPool` is a list of CPU IDs that are available for pinning. + * The `targetCpuNum` is the number of CPUs that are required for pinning. + * The `loadBalancing` flag indicates whether the CPU pinning should be load balanced or not (e.g, NUMA placement when `limits.cpu` is a single number which means + * a required number of vCPUs per instance that can be chosen within a CPU pool). + */ +func fillFixedInstances(fixedInstances map[int64][]instance.Instance, inst instance.Instance, effectiveCpus []int64, targetCpuPool []int64, targetCpuNum int, loadBalancing bool) { + if len(targetCpuPool) < targetCpuNum { + diffCount := len(targetCpuPool) - targetCpuNum + logger.Warnf("%v CPUs have been required for pinning, but %v CPUs won't be allocated", len(targetCpuPool), -diffCount) + targetCpuNum = len(targetCpuPool) + } + + // If the `targetCpuPool` has been manually specified (explicit CPU IDs/ranges specified with `limits.cpu`) + if len(targetCpuPool) == targetCpuNum && !loadBalancing { + for _, nr := range targetCpuPool { + if !shared.Int64InSlice(nr, effectiveCpus) { + continue + } + + _, ok := fixedInstances[nr] + if ok { + fixedInstances[nr] = append(fixedInstances[nr], inst) + } else { + fixedInstances[nr] = []instance.Instance{inst} + } + } + + return + } + + // If we need to load-balance the instance across the CPUs of `targetCpuPool` (e.g, NUMA placement), + // the heuristic is to sort the `targetCpuPool` by usage (number of instances already pinned to each CPU) + // and then assign the instance to the first `desiredCpuNum` least used CPUs. + usage := map[int64]deviceTaskCPU{} + for _, id := range targetCpuPool { + cpu := deviceTaskCPU{} + cpu.id = id + cpu.strId = fmt.Sprintf("%d", id) + + count := 0 + _, ok := fixedInstances[id] + if ok { + count = len(fixedInstances[id]) + } + + cpu.count = &count + usage[id] = cpu + } + + sortedUsage := make(deviceTaskCPUs, 0) + for _, value := range usage { + sortedUsage = append(sortedUsage, value) + } + + sort.Sort(sortedUsage) + count := 0 + for _, cpu := range sortedUsage { + if count == targetCpuNum { + break + } + + id := cpu.id + _, ok := fixedInstances[id] + if ok { + fixedInstances[id] = append(fixedInstances[id], inst) + } else { + fixedInstances[id] = []instance.Instance{inst} + } + + count++ + } +} + +// deviceTaskBalance is used to balance the CPU load across containers running on a host. +// It first checks if CGroup support is available and returns if it isn't. +// It then retrieves the effective CPU list (the CPUs that are guaranteed to be online) and isolates any isolated CPUs. +// After that, it loads all instances of containers running on the node and iterates through them. +// +// For each container, it checks its CPU limits and determines whether it is pinned to specific CPUs or can use the load-balancing mechanism. +// If it is pinned, the function adds it to the fixedInstances map with the CPU numbers it is pinned to. +// If not, the container will be included in the load-balancing calculation, +// and the number of CPUs it can use is determined by taking the minimum of its assigned CPUs and the available CPUs. Note that if +// NUMA placement is enabled (`limits.cpu.nodes` is not empty), we apply a similar load-balancing logic to the `fixedInstances` map +// with a constraint being the number of vCPUs and the CPU pool being the CPUs pinned to a set of NUMA nodes. +// +// Next, the function balance the CPU usage by iterating over all the CPUs and dividing the containers into those that +// are pinned to a specific CPU and those that are load-balanced. For the pinned containers, +// it adds them to the pinning map with the CPU number it's pinned to. +// For the load-balanced containers, it sorts the available CPUs based on their usage count and assigns them to containers +// in ascending order until the required number of CPUs have been assigned. +// Finally, the pinning map is used to set the new CPU pinning for each container, updating it to the new balanced state. +// +// Overall, this function ensures that the CPU resources of the host are utilized effectively amongst all the containers running on it. func deviceTaskBalance(s *state.State) { min := func(x, y int) int { if x < y { @@ -375,10 +471,41 @@ func deviceTaskBalance(s *state.State) { return } + // Get CPU topology. + cpusTopology, err := resources.GetCPU() + if err != nil { + logger.Errorf("Unable to load system CPUs information: %v", err) + return + } + + // Build a map of NUMA node to CPU threads. + numaNodeToCPU := make(map[int64][]int64) + for _, cpu := range cpusTopology.Sockets { + for _, core := range cpu.Cores { + for _, thread := range core.Threads { + numaNodeToCPU[int64(thread.NUMANode)] = append(numaNodeToCPU[int64(thread.NUMANode)], thread.ID) + } + } + } + fixedInstances := map[int64][]instance.Instance{} balancedInstances := map[instance.Instance]int{} for _, c := range instances { conf := c.ExpandedConfig() + cpuNodes := conf["limits.cpu.nodes"] + var numaCpus []int64 + if cpuNodes != "" { + numaNodeSet, err := resources.ParseNumaNodeSet(cpuNodes) + if err != nil { + logger.Error("Error parsing numa node set", logger.Ctx{"numaNodes": cpuNodes, "err": err}) + return + } + + for _, numaNode := range numaNodeSet { + numaCpus = append(numaCpus, numaNodeToCPU[numaNode]...) + } + } + cpulimit, ok := conf["limits.cpu"] if !ok || cpulimit == "" { cpulimit = effectiveCpus @@ -397,7 +524,11 @@ func deviceTaskBalance(s *state.State) { if err == nil { // Load-balance count = min(count, len(cpus)) - balancedInstances[c] = count + if len(numaCpus) > 0 { + fillFixedInstances(fixedInstances, c, cpus, numaCpus, count, true) + } else { + balancedInstances[c] = count + } } else { // Pinned containerCpus, err := resources.ParseCpuset(cpulimit) @@ -405,18 +536,11 @@ func deviceTaskBalance(s *state.State) { return } - for _, nr := range containerCpus { - if !shared.Int64InSlice(nr, cpus) { - continue - } - - _, ok := fixedInstances[nr] - if ok { - fixedInstances[nr] = append(fixedInstances[nr], c) - } else { - fixedInstances[nr] = []instance.Instance{c} - } + if len(numaCpus) > 0 { + logger.Warnf("The pinned CPUs: %v, override the NUMA configuration with the CPUs: %v", containerCpus, numaCpus) } + + fillFixedInstances(fixedInstances, c, cpus, containerCpus, len(containerCpus), false) } } From 815d1aa414c2d21befc7cb8913b8df1266bdab42 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Mon, 19 Jun 2023 13:28:46 +0200 Subject: [PATCH 070/543] lxd/instance: Reschedule instance if `limits.cpu.nodes` is updated. Signed-off-by: Gabriel Mougard --- lxd/instance/drivers/driver_lxc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go index 607a9bd3c905..e7a992f899b2 100644 --- a/lxd/instance/drivers/driver_lxc.go +++ b/lxd/instance/drivers/driver_lxc.go @@ -4540,7 +4540,7 @@ func (d *lxc) Update(args db.InstanceArgs, userRequested bool) error { if err != nil { return err } - } else if key == "limits.cpu" { + } else if key == "limits.cpu" || key == "limits.cpu.nodes" { // Trigger a scheduler re-run cgroup.TaskSchedulerTrigger("container", d.name, "changed") } else if key == "limits.cpu.priority" || key == "limits.cpu.allowance" { From ab403c5a4fe35f806716fb88ec25d8ddfd720ad0 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Mon, 19 Jun 2023 13:29:08 +0200 Subject: [PATCH 071/543] shared/instance: Add `limits.cpu.nodes` field to instance config keys Signed-off-by: Gabriel Mougard --- shared/instance.go | 1 + 1 file changed, 1 insertion(+) diff --git a/shared/instance.go b/shared/instance.go index abd60fe29836..480fcd23916c 100644 --- a/shared/instance.go +++ b/shared/instance.go @@ -89,6 +89,7 @@ var InstanceConfigKeysAny = map[string]func(value string) error{ "cluster.evacuate": validate.Optional(validate.IsOneOf("auto", "migrate", "live-migrate", "stop")), "limits.cpu": validate.Optional(validate.IsValidCPUSet), + "limits.cpu.nodes": validate.Optional(validate.IsValidCPUSet), "limits.disk.priority": validate.Optional(validate.IsPriority), "limits.memory": func(value string) error { if value == "" { From f827ab5d82a35c72215369be81368b5f0d821390 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Mon, 19 Jun 2023 13:29:33 +0200 Subject: [PATCH 072/543] doc: Add `limits.cpu.nodes` Signed-off-by: Gabriel Mougard --- doc/reference/instance_options.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/reference/instance_options.md b/doc/reference/instance_options.md index ffbc917ac48e..8e7b2cc4b3b8 100644 --- a/doc/reference/instance_options.md +++ b/doc/reference/instance_options.md @@ -88,6 +88,7 @@ Key | Type | Default :-- | :--- | :------ | :---------- | :---------- | :---------- `limits.cpu` | string | for VMs: 1 CPU | yes | - | Number or range of CPUs to expose to the instance; see {ref}`instance-options-limits-cpu` `limits.cpu.allowance` | string | `100%` | yes | container | Controls how much of the CPU can be used: either a percentage (`50%`) for a soft limit or a chunk of time (`25ms/100ms`) for a hard limit; see {ref}`instance-options-limits-cpu-container` +`limits.cpu.nodes` | string | - | yes | - | List of comma-separated NUMA node IDs or range to place the instance CPUs on; see {ref}`instance-options-limits-cpu-container` `limits.cpu.priority` | integer | `10` (maximum) | yes | container | CPU scheduling priority compared to other instances sharing the same CPUs when overcommitting resources (integer between 0 and 10); see {ref}`instance-options-limits-cpu-container` `limits.disk.priority` | integer | `5` (medium) | yes | - | Controls how much priority to give to the instance's I/O requests when under load (integer between 0 and 10) `limits.hugepages.64KB` | string | - | yes | container | Fixed value in bytes (various suffixes supported, see {ref}`instances-limit-units`) to limit number of 64 KB huge pages; see {ref}`instance-options-limits-hugepages` @@ -167,6 +168,10 @@ All this allows for very high performance operations in the guest as the guest s It is used to calculate the scheduler priority for the instance, relative to any other instance that is using the same CPU or CPUs. For example, to limit the CPU usage of the container to one CPU when under load, set `limits.cpu.allowance` to `100%`. +`limits.cpu.nodes` can be used to restrict the CPUs that the instance can use to a specific set of NUMA nodes: + +- To specify which NUMA nodes to use, set `limits.cpu.nodes` to either a set of NUMA node IDs (for example, `0,1`) or a NUMA node ranges (for example, `0-1,2-4`). + `limits.cpu.priority` is another factor that is used to compute the scheduler priority score when a number of instances sharing a set of CPUs have the same percentage of CPU assigned to them. (instance-options-limits-hugepages)= From 9e097ec0a43b7e44fbf0f04db0f4d87e96f03b1c Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Mon, 19 Jun 2023 13:30:18 +0200 Subject: [PATCH 073/543] api: Add `limits.cpu.nodes` Signed-off-by: Gabriel Mougard --- doc/api-extensions.md | 7 +++++++ shared/version/api.go | 1 + 2 files changed, 8 insertions(+) diff --git a/doc/api-extensions.md b/doc/api-extensions.md index 1ac169f2e4a9..b03f77ce8df8 100644 --- a/doc/api-extensions.md +++ b/doc/api-extensions.md @@ -2049,3 +2049,10 @@ Add current user details to the main API endpoint. ## `instances_state_total` This extension adds a new `total` field to `InstanceStateDisk` and `InstanceStateMemory`, both part of the instance's state API. + +## `numa_cpu_placement` +This adds the possibility to place a set of CPUs in a desired set of NUMA nodes. + +This adds the following new configuration key: + +* `limits.cpu.nodes` : (string) comma-separated list of NUMA node IDs or NUMA node ID ranges to place the CPUs (chosen with a dynamic value of `limits.cpu`) in. diff --git a/shared/version/api.go b/shared/version/api.go index e616263eaef4..d9e2ad98da81 100644 --- a/shared/version/api.go +++ b/shared/version/api.go @@ -347,6 +347,7 @@ var APIExtensions = []string{ "migration_vm_live", "auth_user", "instances_state_total", + "numa_cpu_placement", } // APIExtensionsCount returns the number of available API extensions. From d23bbdcab26081bab64215bd094a1606b18777e4 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 19 Jun 2023 15:32:11 +0100 Subject: [PATCH 074/543] test: Add sleep between restarting instance and checking monitor logs in clustering_events Signed-off-by: Thomas Parrott --- test/suites/clustering.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh index 979eda4eaaab..64f797894ac9 100644 --- a/test/suites/clustering.sh +++ b/test/suites/clustering.sh @@ -3475,6 +3475,7 @@ test_clustering_events() { # Restart instance generating restart lifecycle event. LXD_DIR="${LXD_ONE_DIR}" lxc restart -f c1 LXD_DIR="${LXD_THREE_DIR}" lxc restart -f c2 + sleep 2 # Check events were distributed. for i in 1 2 3; do @@ -3503,6 +3504,7 @@ test_clustering_events() { # Restart instance generating restart lifecycle event. LXD_DIR="${LXD_ONE_DIR}" lxc restart -f c1 LXD_DIR="${LXD_THREE_DIR}" lxc restart -f c2 + sleep 2 # Check events were distributed. for i in 1 2 3; do @@ -3536,6 +3538,7 @@ test_clustering_events() { # Restart instance generating restart lifecycle event. LXD_DIR="${LXD_ONE_DIR}" lxc restart -f c1 LXD_DIR="${LXD_THREE_DIR}" lxc restart -f c2 + sleep 2 # Check events were distributed. for i in 1 2 3; do @@ -3568,8 +3571,8 @@ test_clustering_events() { # Confirm that local operations are not blocked by having no event hubs running, but that events are not being # distributed. LXD_DIR="${LXD_ONE_DIR}" lxc restart -f c1 - sleep 1 - grep -Fc "instance-restarted" "${TEST_DIR}/node1.log" + sleep 2 + grep -Fc "instance-restarted" "${TEST_DIR}/node1.log" | grep -Fx 7 for i in 2 3; do cat "${TEST_DIR}/node${i}.log" From 59cbbf489af180a9f2aeb2acaedaf186557f53ee Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Mon, 19 Jun 2023 15:36:49 -0400 Subject: [PATCH 075/543] *: replace Seek(0, 0) by Seek(0, io.SeekStart) as the later is clearer Signed-off-by: Simon Deziel --- client/lxd_images.go | 4 ++-- lxc/export.go | 2 +- lxc/image.go | 2 +- lxd/archive/archive.go | 2 +- lxd/backup/backup_utils.go | 2 +- lxd/images.go | 12 ++++++------ lxd/instances_post.go | 4 ++-- lxd/storage/drivers/driver_btrfs_volumes.go | 2 +- lxd/storage/drivers/driver_zfs_volumes.go | 2 +- lxd/storage/drivers/generic_vfs.go | 4 ++-- lxd/storage_volumes.go | 4 ++-- shared/util.go | 2 +- 12 files changed, 21 insertions(+), 21 deletions(-) diff --git a/client/lxd_images.go b/client/lxd_images.go index 329bb87489a5..f66b13a65744 100644 --- a/client/lxd_images.go +++ b/client/lxd_images.go @@ -785,12 +785,12 @@ func (r *ProtocolLXD) CopyImage(source ImageServer, image api.Image, args *Image } // Export image - _, err = metaFile.Seek(0, 0) + _, err = metaFile.Seek(0, io.SeekStart) if err != nil { return nil, err } - _, err = rootfsFile.Seek(0, 0) + _, err = rootfsFile.Seek(0, io.SeekStart) if err != nil { return nil, err } diff --git a/lxc/export.go b/lxc/export.go index 0515cdf0aff2..6530799257d3 100644 --- a/lxc/export.go +++ b/lxc/export.go @@ -168,7 +168,7 @@ func (c *cmdExport) Run(cmd *cobra.Command, args []string) error { // Detect backup file type and rename file accordingly if len(args) <= 1 { - _, err := target.Seek(0, 0) + _, err := target.Seek(0, io.SeekStart) if err != nil { return err } diff --git a/lxc/image.go b/lxc/image.go index 19992c68ad09..5ac54d167b84 100644 --- a/lxc/image.go +++ b/lxc/image.go @@ -808,7 +808,7 @@ func (c *cmdImageImport) Run(cmd *cobra.Command, args []string) error { return err } - _, err = rootfs.(*os.File).Seek(0, 0) + _, err = rootfs.(*os.File).Seek(0, io.SeekStart) if err != nil { return err } diff --git a/lxd/archive/archive.go b/lxd/archive/archive.go index 5b82321586d1..1dbfa41c8ac4 100644 --- a/lxd/archive/archive.go +++ b/lxd/archive/archive.go @@ -77,7 +77,7 @@ func ExtractWithFds(cmd string, args []string, allowedCmds []string, stdin io.Re func CompressedTarReader(ctx context.Context, r io.ReadSeeker, unpacker []string, sysOS *sys.OS, outputPath string) (*tar.Reader, context.CancelFunc, error) { ctx, cancelFunc := context.WithCancel(ctx) - _, err := r.Seek(0, 0) + _, err := r.Seek(0, io.SeekStart) if err != nil { return nil, cancelFunc, err } diff --git a/lxd/backup/backup_utils.go b/lxd/backup/backup_utils.go index 32d3d1d682c9..f4e6d4d6108f 100644 --- a/lxd/backup/backup_utils.go +++ b/lxd/backup/backup_utils.go @@ -13,7 +13,7 @@ import ( // TarReader rewinds backup file handle r and returns new tar reader and process cleanup function. func TarReader(r io.ReadSeeker, sysOS *sys.OS, outputPath string) (*tar.Reader, context.CancelFunc, error) { - _, err := r.Seek(0, 0) + _, err := r.Seek(0, io.SeekStart) if err != nil { return nil, nil, err } diff --git a/lxd/images.go b/lxd/images.go index fa0210ebabd0..986659bd1d74 100644 --- a/lxd/images.go +++ b/lxd/images.go @@ -157,7 +157,7 @@ func compressFile(compress string, infile io.Reader, outfile io.Writer) error { return fmt.Errorf("tar2sqfs: %v (%v)", err, strings.TrimSpace(string(output))) } // Replay the result to outfile - _, err = tempfile.Seek(0, 0) + _, err = tempfile.Seek(0, io.SeekStart) if err != nil { return err } @@ -550,7 +550,7 @@ func getImgPostInfo(s *state.State, r *http.Request, builddir string, project st defer func() { _ = os.Remove(imageTarf.Name()) }() // Parse the POST data - _, err = post.Seek(0, 0) + _, err = post.Seek(0, io.SeekStart) if err != nil { return nil, err } @@ -644,7 +644,7 @@ func getImgPostInfo(s *state.State, r *http.Request, builddir string, project st return nil, err } } else { - _, err = post.Seek(0, 0) + _, err = post.Seek(0, io.SeekStart) if err != nil { return nil, err } @@ -952,7 +952,7 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { } // Is this a container request? - _, err = post.Seek(0, 0) + _, err = post.Seek(0, io.SeekStart) if err != nil { return response.InternalError(err) } @@ -993,7 +993,7 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { if !imageUpload && shared.StringInSlice(req.Source.Type, []string{"container", "instance", "virtual-machine", "snapshot"}) { name := req.Source.Name if name != "" { - _, err = post.Seek(0, 0) + _, err = post.Seek(0, io.SeekStart) if err != nil { return response.InternalError(err) } @@ -1145,7 +1145,7 @@ func getImageMetadata(fname string) (*api.ImageMetadata, string, error) { return nil, "unknown", err } - _, err = r.Seek(0, 0) + _, err = r.Seek(0, io.SeekStart) if err != nil { return nil, "", err } diff --git a/lxd/instances_post.go b/lxd/instances_post.go index cc2f5f026228..e94ff2205853 100644 --- a/lxd/instances_post.go +++ b/lxd/instances_post.go @@ -577,7 +577,7 @@ func createFromBackup(s *state.State, r *http.Request, projectName string, data } // Detect squashfs compression and convert to tarball. - _, err = backupFile.Seek(0, 0) + _, err = backupFile.Seek(0, io.SeekStart) if err != nil { return response.InternalError(err) } @@ -614,7 +614,7 @@ func createFromBackup(s *state.State, r *http.Request, projectName string, data } // Parse the backup information. - _, err = backupFile.Seek(0, 0) + _, err = backupFile.Seek(0, io.SeekStart) if err != nil { return response.InternalError(err) } diff --git a/lxd/storage/drivers/driver_btrfs_volumes.go b/lxd/storage/drivers/driver_btrfs_volumes.go index b6e3c65e217a..6ad1172a53cc 100644 --- a/lxd/storage/drivers/driver_btrfs_volumes.go +++ b/lxd/storage/drivers/driver_btrfs_volumes.go @@ -171,7 +171,7 @@ func (d *btrfs) CreateVolumeFromBackup(vol Volume, srcBackup backup.Info, srcDat revert.Add(revertHook) // Find the compression algorithm used for backup source data. - _, err = srcData.Seek(0, 0) + _, err = srcData.Seek(0, io.SeekStart) if err != nil { return nil, nil, err } diff --git a/lxd/storage/drivers/driver_zfs_volumes.go b/lxd/storage/drivers/driver_zfs_volumes.go index 499e0139e8d9..8019fffad05f 100644 --- a/lxd/storage/drivers/driver_zfs_volumes.go +++ b/lxd/storage/drivers/driver_zfs_volumes.go @@ -421,7 +421,7 @@ func (d *zfs) CreateVolumeFromBackup(vol Volume, srcBackup backup.Info, srcData for _, v := range vols { // Find the compression algorithm used for backup source data. - _, err := srcData.Seek(0, 0) + _, err := srcData.Seek(0, io.SeekStart) if err != nil { return nil, nil, err } diff --git a/lxd/storage/drivers/generic_vfs.go b/lxd/storage/drivers/generic_vfs.go index f275d10f2f2f..9f8d3a1885fb 100644 --- a/lxd/storage/drivers/generic_vfs.go +++ b/lxd/storage/drivers/generic_vfs.go @@ -704,7 +704,7 @@ func genericVFSBackupUnpack(d Driver, sysOS *sys.OS, vol Volume, snapshots []str // Extract filesystem volume. d.Logger().Debug(fmt.Sprintf("Unpacking %s filesystem volume", volTypeName), logger.Ctx{"source": srcPrefix, "target": mountPath, "args": fmt.Sprintf("%+v", args)}) - _, err := srcData.Seek(0, 0) + _, err := srcData.Seek(0, io.SeekStart) if err != nil { return err } @@ -801,7 +801,7 @@ func genericVFSBackupUnpack(d Driver, sysOS *sys.OS, vol Volume, snapshots []str defer revert.Fail() // Find the compression algorithm used for backup source data. - _, err := srcData.Seek(0, 0) + _, err := srcData.Seek(0, io.SeekStart) if err != nil { return nil, nil, err } diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go index ce564140b7db..c9bc3b7e4cb7 100644 --- a/lxd/storage_volumes.go +++ b/lxd/storage_volumes.go @@ -1868,7 +1868,7 @@ func createStoragePoolVolumeFromBackup(s *state.State, r *http.Request, requestP } // Detect squashfs compression and convert to tarball. - _, err = backupFile.Seek(0, 0) + _, err = backupFile.Seek(0, io.SeekStart) if err != nil { return response.InternalError(err) } @@ -1905,7 +1905,7 @@ func createStoragePoolVolumeFromBackup(s *state.State, r *http.Request, requestP } // Parse the backup information. - _, err = backupFile.Seek(0, 0) + _, err = backupFile.Seek(0, io.SeekStart) if err != nil { return response.InternalError(err) } diff --git a/shared/util.go b/shared/util.go index 85e46b6d18ae..8a9a7fdd4efb 100644 --- a/shared/util.go +++ b/shared/util.go @@ -1107,7 +1107,7 @@ func SetProgressMetadata(metadata map[string]any, stage, displayPrefix string, p func DownloadFileHash(ctx context.Context, httpClient *http.Client, useragent string, progress func(progress ioprogress.ProgressData), canceler *cancel.HTTPRequestCanceller, filename string, url string, hash string, hashFunc hash.Hash, target io.WriteSeeker) (int64, error) { // Always seek to the beginning - _, _ = target.Seek(0, 0) + _, _ = target.Seek(0, io.SeekStart) var req *http.Request var err error From 0cd55f444cefc6021d5afab592fb9c60da3dc30f Mon Sep 17 00:00:00 2001 From: Ruth Fuchss Date: Wed, 21 Jun 2023 14:15:53 +0200 Subject: [PATCH 076/543] doc: clean up doc about NUMA node IDs Signed-off-by: Ruth Fuchss --- doc/reference/instance_options.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/doc/reference/instance_options.md b/doc/reference/instance_options.md index 8e7b2cc4b3b8..1b47488d6fe3 100644 --- a/doc/reference/instance_options.md +++ b/doc/reference/instance_options.md @@ -88,7 +88,7 @@ Key | Type | Default :-- | :--- | :------ | :---------- | :---------- | :---------- `limits.cpu` | string | for VMs: 1 CPU | yes | - | Number or range of CPUs to expose to the instance; see {ref}`instance-options-limits-cpu` `limits.cpu.allowance` | string | `100%` | yes | container | Controls how much of the CPU can be used: either a percentage (`50%`) for a soft limit or a chunk of time (`25ms/100ms`) for a hard limit; see {ref}`instance-options-limits-cpu-container` -`limits.cpu.nodes` | string | - | yes | - | List of comma-separated NUMA node IDs or range to place the instance CPUs on; see {ref}`instance-options-limits-cpu-container` +`limits.cpu.nodes` | string | - | yes | - | Comma-separated list of NUMA node IDs or ranges to place the instance CPUs on; see {ref}`instance-options-limits-cpu-container` `limits.cpu.priority` | integer | `10` (maximum) | yes | container | CPU scheduling priority compared to other instances sharing the same CPUs when overcommitting resources (integer between 0 and 10); see {ref}`instance-options-limits-cpu-container` `limits.disk.priority` | integer | `5` (medium) | yes | - | Controls how much priority to give to the instance's I/O requests when under load (integer between 0 and 10) `limits.hugepages.64KB` | string | - | yes | container | Fixed value in bytes (various suffixes supported, see {ref}`instances-limit-units`) to limit number of 64 KB huge pages; see {ref}`instance-options-limits-hugepages` @@ -168,9 +168,8 @@ All this allows for very high performance operations in the guest as the guest s It is used to calculate the scheduler priority for the instance, relative to any other instance that is using the same CPU or CPUs. For example, to limit the CPU usage of the container to one CPU when under load, set `limits.cpu.allowance` to `100%`. -`limits.cpu.nodes` can be used to restrict the CPUs that the instance can use to a specific set of NUMA nodes: - -- To specify which NUMA nodes to use, set `limits.cpu.nodes` to either a set of NUMA node IDs (for example, `0,1`) or a NUMA node ranges (for example, `0-1,2-4`). +`limits.cpu.nodes` can be used to restrict the CPUs that the instance can use to a specific set of NUMA nodes. +To specify which NUMA nodes to use, set `limits.cpu.nodes` to either a set of NUMA node IDs (for example, `0,1`) or a set of NUMA node ranges (for example, `0-1,2-4`). `limits.cpu.priority` is another factor that is used to compute the scheduler priority score when a number of instances sharing a set of CPUs have the same percentage of CPU assigned to them. From 37d3a0cf4a26458caefe7b392ede6a74503dc16a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Wed, 21 Jun 2023 10:15:58 -0400 Subject: [PATCH 077/543] lxd/instance/qemu: Fix vsock id type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- lxd/instance/drivers/driver_qemu.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 9d67b9dda6ab..81e562c52fda 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -7260,14 +7260,7 @@ func (d *qemu) vsockID() int { // unique, non-clashing context ID for our guest. info := DriverStatuses()[instancetype.VM].Info - feature, found := info.Features["vhost_vsock"] - - vsockID, ok := feature.(int) - if !found || !ok { - vsockID = vsock.Host - } - - return vsockID + 1 + d.id + return info.Features["vhost_vsock"].(int) + 1 + d.id } // InitPID returns the instance's current process ID. @@ -7783,7 +7776,7 @@ func (d *qemu) checkFeatures(hostArch int, qemuPath string) (map[string]any, err // Fallback to the default ID for a host system features["vhost_vsock"] = vsock.Host } else { - features["vhost_vsock"] = vsockID + features["vhost_vsock"] = int(vsockID) } return features, nil From db2447c61f86497fc4dec0ec0ed1c5df69217e1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Wed, 21 Jun 2023 10:58:36 -0400 Subject: [PATCH 078/543] lxd/instance/lxc: Fix live cgroup updates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- lxd/instance/drivers/driver_lxc.go | 46 ++++++++++++++---------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go index e7a992f899b2..ed7524f34377 100644 --- a/lxd/instance/drivers/driver_lxc.go +++ b/lxd/instance/drivers/driver_lxc.go @@ -667,7 +667,7 @@ func (d *lxc) initLXC(config bool) (*liblxc.Container, error) { }) // Load cgroup abstraction - cg, err := d.cgroup(cc) + cg, err := d.cgroup(cc, false) if err != nil { return nil, err } @@ -1485,7 +1485,7 @@ func (d *lxc) deviceAddCgroupRules(cgroups []deviceConfig.RunConfigItem) error { return err } - cg, err := d.cgroup(cc) + cg, err := d.cgroup(cc, true) if err != nil { return err } @@ -2668,7 +2668,7 @@ func (d *lxc) Stop(stateful bool) error { } // Load cgroup abstraction - cg, err := d.cgroup(cc) + cg, err := d.cgroup(cc, true) if err != nil { op.Done(err) return err @@ -3049,7 +3049,7 @@ func (d *lxc) Freeze() error { return err } - cg, err := d.cgroup(cc) + cg, err := d.cgroup(cc, true) if err != nil { return err } @@ -3099,7 +3099,7 @@ func (d *lxc) Unfreeze() error { return err } - cg, err := d.cgroup(cc) + cg, err := d.cgroup(cc, true) if err != nil { return err } @@ -4307,7 +4307,7 @@ func (d *lxc) Update(args db.InstanceArgs, userRequested bool) error { return err } - cg, err := d.cgroup(cc) + cg, err := d.cgroup(cc, true) if err != nil { return err } @@ -7052,7 +7052,7 @@ func (d *lxc) cpuState() api.InstanceStateCPU { } // CPU usage in seconds - cg, err := d.cgroup(cc) + cg, err := d.cgroup(cc, true) if err != nil { return cpu } @@ -7130,7 +7130,7 @@ func (d *lxc) memoryState() api.InstanceStateMemory { return memory } - cg, err := d.cgroup(cc) + cg, err := d.cgroup(cc, true) if err != nil { return memory } @@ -7254,7 +7254,7 @@ func (d *lxc) processesState(pid int) (int64, error) { return -1, err } - cg, err := d.cgroup(cc) + cg, err := d.cgroup(cc, true) if err != nil { return 0, err } @@ -7876,7 +7876,7 @@ func (d *lxc) setNetworkPriority() error { } // Load the cgroup struct. - cg, err := d.cgroup(cc) + cg, err := d.cgroup(cc, true) if err != nil { return err } @@ -8095,20 +8095,18 @@ func (d *lxc) CGroup() (*cgroup.CGroup, error) { return nil, err } - return d.cgroup(cc) + return d.cgroup(cc, true) } -func (d *lxc) cgroup(cc *liblxc.Container) (*cgroup.CGroup, error) { - rw := lxcCgroupReadWriter{} - if cc != nil { - rw.cc = cc - rw.conf = true - } - - if rw.cc == nil { +func (d *lxc) cgroup(cc *liblxc.Container, running bool) (*cgroup.CGroup, error) { + if cc == nil { return nil, fmt.Errorf("Container not initialized for cgroup") } + rw := lxcCgroupReadWriter{} + rw.cc = cc + rw.running = running + cg, err := cgroup.New(&rw) if err != nil { return nil, err @@ -8119,12 +8117,12 @@ func (d *lxc) cgroup(cc *liblxc.Container) (*cgroup.CGroup, error) { } type lxcCgroupReadWriter struct { - cc *liblxc.Container - conf bool + cc *liblxc.Container + running bool } func (rw *lxcCgroupReadWriter) Get(version cgroup.Backend, controller string, key string) (string, error) { - if rw.conf { + if !rw.running { lxcKey := fmt.Sprintf("lxc.cgroup.%s", key) if version == cgroup.V2 { @@ -8138,7 +8136,7 @@ func (rw *lxcCgroupReadWriter) Get(version cgroup.Backend, controller string, ke } func (rw *lxcCgroupReadWriter) Set(version cgroup.Backend, controller string, key string, value string) error { - if rw.conf { + if !rw.running { if version == cgroup.V1 { return lxcSetConfigItem(rw.cc, fmt.Sprintf("lxc.cgroup.%s", key), value) } @@ -8182,7 +8180,7 @@ func (d *lxc) Metrics(hostInterfaces []net.Interface) (*metrics.MetricSet, error } // Load cgroup abstraction - cg, err := d.cgroup(cc) + cg, err := d.cgroup(cc, true) if err != nil { return nil, err } From 1a244b45ef861d69421120e59a6d86f18e063d10 Mon Sep 17 00:00:00 2001 From: Max Asnaashari Date: Tue, 20 Jun 2023 23:46:45 +0000 Subject: [PATCH 079/543] lxd/filter: Support custom filter operators This extends functionality of the filter logic to support configurable operators, as well as new integer comparisons, and custom parsers to format values. Signed-off-by: Max Asnaashari --- lxd/filter/clause.go | 59 +++++++---- lxd/filter/clause_test.go | 10 +- lxd/filter/match.go | 205 ++++++++++++++++++++++++++++++++------ lxd/filter/match_test.go | 10 +- lxd/filter/operator.go | 34 +++++++ 5 files changed, 261 insertions(+), 57 deletions(-) create mode 100644 lxd/filter/operator.go diff --git a/lxd/filter/clause.go b/lxd/filter/clause.go index 9d5cbeccc698..bd79a6d360b4 100644 --- a/lxd/filter/clause.go +++ b/lxd/filter/clause.go @@ -2,6 +2,7 @@ package filter import ( "fmt" + "regexp" "strings" "github.com/canonical/lxd/shared" @@ -16,19 +17,36 @@ type Clause struct { Value string } +// ClauseSet is a set of clauses. There are configurable functions that can be used to +// perform unique parsing of the clauses. +type ClauseSet struct { + Clauses []Clause + Ops OperatorSet + + ParseInt func(Clause) (int64, error) + ParseUint func(Clause) (uint64, error) + ParseString func(Clause) (string, error) + ParseBool func(Clause) (bool, error) + ParseRegexp func(Clause) (*regexp.Regexp, error) +} + // Parse a user-provided filter string. -func Parse(s string) ([]Clause, error) { +func Parse(s string, op OperatorSet) (*ClauseSet, error) { + if !op.isValid() { + return nil, fmt.Errorf("Invalid operator set") + } + clauses := []Clause{} parts := strings.Fields(s) index := 0 - prevLogical := "and" + prevLogical := op.And for index < len(parts) { clause := Clause{} - if strings.EqualFold(parts[index], "not") { + if strings.EqualFold(parts[index], op.Negate) { clause.Not = true index++ if index == len(parts) { @@ -55,23 +73,25 @@ func Parse(s string) ([]Clause, error) { value := parts[index] // support strings with spaces that are quoted - if strings.HasPrefix(value, "\"") { - value = value[1:] - for { - index++ - if index == len(parts) { - return nil, fmt.Errorf("unterminated quote") + for _, symbol := range op.Quote { + if strings.HasPrefix(value, symbol) { + value = value[1:] + for { + index++ + if index == len(parts) { + return nil, fmt.Errorf("unterminated quote") + } + + if strings.HasSuffix(parts[index], symbol) { + break + } + + value += " " + parts[index] } - if strings.HasSuffix(parts[index], "\"") { - break - } - - value += " " + parts[index] + end := parts[index] + value += " " + end[0:len(end)-1] } - - end := parts[index] - value += " " + end[0:len(end)-1] } clause.Value = value @@ -80,7 +100,7 @@ func Parse(s string) ([]Clause, error) { clause.PrevLogical = prevLogical if index < len(parts) { prevLogical = parts[index] - if !shared.StringInSlice(prevLogical, []string{"and", "or"}) { + if !shared.StringInSlice(prevLogical, []string{op.And, op.Or}) { return nil, fmt.Errorf("invalid clause composition") } @@ -89,8 +109,9 @@ func Parse(s string) ([]Clause, error) { return nil, fmt.Errorf("unterminated compound clause") } } + clauses = append(clauses, clause) } - return clauses, nil + return &ClauseSet{Clauses: clauses, Ops: op}, nil } diff --git a/lxd/filter/clause_test.go b/lxd/filter/clause_test.go index 20174f5ef972..bd8f18f7404c 100644 --- a/lxd/filter/clause_test.go +++ b/lxd/filter/clause_test.go @@ -23,7 +23,7 @@ func TestParse_Error(t *testing.T) { for s, message := range cases { t.Run(s, func(t *testing.T) { - clauses, err := filter.Parse(s) + clauses, err := filter.Parse(s, filter.QueryOperatorSet()) assert.Nil(t, clauses) assert.EqualError(t, err, message) }) @@ -31,11 +31,11 @@ func TestParse_Error(t *testing.T) { } func TestParse(t *testing.T) { - clauses, err := filter.Parse("foo eq \"bar egg\" or not baz eq yuk") + clauses, err := filter.Parse("foo eq \"bar egg\" or not baz eq yuk", filter.QueryOperatorSet()) require.NoError(t, err) - assert.Len(t, clauses, 2) - clause1 := clauses[0] - clause2 := clauses[1] + assert.Len(t, clauses.Clauses, 2) + clause1 := clauses.Clauses[0] + clause2 := clauses.Clauses[1] assert.False(t, clause1.Not) assert.Equal(t, "and", clause1.PrevLogical) assert.Equal(t, "foo", clause1.Field) diff --git a/lxd/filter/match.go b/lxd/filter/match.go index 5fc42a3cb373..71d961c438fd 100644 --- a/lxd/filter/match.go +++ b/lxd/filter/match.go @@ -1,40 +1,42 @@ package filter import ( + "fmt" "reflect" "regexp" + "strconv" "strings" ) // Match returns true if the given object matches the given filter. -func Match(obj any, clauses []Clause) bool { +func Match(obj any, set ClauseSet) (bool, error) { + if set.ParseInt == nil { + set.ParseInt = DefaultParseInt + } + + if set.ParseUint == nil { + set.ParseUint = DefaultParseUint + } + + if set.ParseString == nil { + set.ParseString = DefaultParseString + } + + if set.ParseBool == nil { + set.ParseBool = DefaultParseBool + } + + if set.ParseRegexp == nil { + set.ParseRegexp = DefaultParseRegexp + } + match := true - for _, clause := range clauses { + for _, clause := range set.Clauses { value := ValueOf(obj, clause.Field) - var clauseMatch bool - - // If 'value' is type of string try to test value as a regexp - // Comparison is case insensitive - if reflect.ValueOf(value).Kind() == reflect.String { - regexpValue := clause.Value - if !(strings.Contains(regexpValue, "^") || strings.Contains(regexpValue, "$")) { - regexpValue = "^" + regexpValue + "$" - } - - r, err := regexp.Compile("(?i)" + regexpValue) - // If not regexp compatible use original value. - if err != nil { - clauseMatch = strings.EqualFold(value.(string), clause.Value) - } else { - clauseMatch = r.MatchString(value.(string)) - } - } else { - clauseMatch = value == clause.Value - } - - if clause.Operator == "ne" { - clauseMatch = !clauseMatch + clauseMatch, err := set.match(clause, value) + if err != nil { + return false, err } // Finish out logic @@ -43,14 +45,159 @@ func Match(obj any, clauses []Clause) bool { } switch clause.PrevLogical { - case "and": + case set.Ops.And: match = match && clauseMatch - case "or": + case set.Ops.Or: match = match || clauseMatch default: - panic("unexpected clause operator") + return false, fmt.Errorf("unexpected clause operator") } } - return match + return match, nil +} + +// DefaultParseInt converts the value of the clause to int64. +func DefaultParseInt(c Clause) (int64, error) { + return strconv.ParseInt(c.Value, 10, 0) +} + +// DefaultParseUint converts the value of the clause to Uint64. +func DefaultParseUint(c Clause) (uint64, error) { + return strconv.ParseUint(c.Value, 10, 0) +} + +// DefaultParseString converts the value of the clause to string. +func DefaultParseString(c Clause) (string, error) { + return c.Value, nil +} + +// DefaultParseBool converts the value of the clause to boolean. +func DefaultParseBool(c Clause) (bool, error) { + return strconv.ParseBool(c.Value) +} + +// DefaultParseRegexp converts the value of the clause to regexp. +func DefaultParseRegexp(c Clause) (*regexp.Regexp, error) { + regexpValue := c.Value + if !(strings.Contains(regexpValue, "^") || strings.Contains(regexpValue, "$")) { + regexpValue = "^" + regexpValue + "$" + } + + return regexp.Compile("(?i)" + regexpValue) +} + +func (s ClauseSet) match(c Clause, objValue any) (bool, error) { + var valueStr string + var valueRegexp *regexp.Regexp + var valueInt int64 + var valueUint uint64 + var valueBool bool + var err error + + // If 'value' is type of string try to test value as a regexp. + valInfo := reflect.ValueOf(objValue) + kind := valInfo.Kind() + switch kind { + case reflect.String: + valueRegexp, _ = s.ParseRegexp(c) + + if valueRegexp == nil { + valueStr, err = s.ParseString(c) + } + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + valueInt, err = s.ParseInt(c) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + valueUint, err = s.ParseUint(c) + case reflect.Bool: + valueBool, err = s.ParseBool(c) + default: + return false, fmt.Errorf("Invalid type %q for field %q", kind.String(), c.Field) + } + + if err != nil { + return false, fmt.Errorf("Failed to parse value: %w", err) + } + + switch c.Operator { + case s.Ops.Equals: + if valueRegexp != nil { + return valueRegexp.MatchString(objValue.(string)), nil + } + + switch val := objValue.(type) { + case string: + // Comparison is case insensitive. + return strings.EqualFold(val, valueStr), nil + case int, int8, int16, int32, int64: + return objValue == valueInt, nil + case uint, uint8, uint16, uint32, uint64: + return objValue == valueUint, nil + case bool: + return objValue == valueBool, nil + } + + case s.Ops.NotEquals: + if valueRegexp != nil { + return !valueRegexp.MatchString(objValue.(string)), nil + } + + switch val := objValue.(type) { + case string: + // Comparison is case insensitive. + return !strings.EqualFold(val, valueStr), nil + case int, int8, int16, int32, int64: + return objValue != valueInt, nil + case uint, uint8, uint16, uint32, uint64: + return objValue != valueUint, nil + case bool: + return objValue != valueBool, nil + } + + case s.Ops.GreaterThan: + switch objValue.(type) { + case string, bool: + return false, fmt.Errorf("Invalid operator %q for field %q", c.Operator, c.Field) + case int, int8, int16, int32, int64: + return valInfo.Int() > valueInt, nil + case uint, uint8, uint16, uint32, uint64: + return valInfo.Uint() > valueUint, nil + } + + case s.Ops.LessThan: + switch objValue.(type) { + case string, bool: + return false, fmt.Errorf("Invalid operator %q for field %q", c.Operator, c.Field) + case int, int8, int16, int32, int64: + return valInfo.Int() < valueInt, nil + case uint, uint8, uint16, uint32, uint64: + return valInfo.Uint() < valueUint, nil + } + + case s.Ops.GreaterEqual: + switch objValue.(type) { + case string, bool: + return false, fmt.Errorf("Invalid operator %q for field %q", c.Operator, c.Field) + case int, int8, int16, int32, int64: + return valInfo.Int() >= valueInt, nil + case uint, uint8, uint16, uint32, uint64: + return valInfo.Uint() >= valueUint, nil + } + + case s.Ops.LessEqual: + switch objValue.(type) { + case string, bool: + return false, fmt.Errorf("Invalid operator %q for field %q", c.Operator, c.Field) + case int, int8, int16, int32, int64: + return valInfo.Int() <= valueInt, nil + case uint, uint8, uint16, uint32, uint64: + return valInfo.Uint() <= valueUint, nil + } + + default: + return false, fmt.Errorf("Unsupported operation") + } + + return false, fmt.Errorf("Unsupported filter type %q for field %q", kind.String(), c.Field) } diff --git a/lxd/filter/match_test.go b/lxd/filter/match_test.go index f82c561281ce..dcd00c031490 100644 --- a/lxd/filter/match_test.go +++ b/lxd/filter/match_test.go @@ -46,9 +46,10 @@ func TestMatch_Instance(t *testing.T) { for s := range cases { t.Run(s, func(t *testing.T) { - f, err := filter.Parse(s) + f, err := filter.Parse(s, filter.QueryOperatorSet()) + require.NoError(t, err) + match, err := filter.Match(instance, *f) require.NoError(t, err) - match := filter.Match(instance, f) assert.Equal(t, cases[s], match) }) } @@ -72,9 +73,10 @@ func TestMatch_Image(t *testing.T) { for s := range cases { t.Run(s, func(t *testing.T) { - f, err := filter.Parse(s) + f, err := filter.Parse(s, filter.QueryOperatorSet()) + require.NoError(t, err) + match, err := filter.Match(image, *f) require.NoError(t, err) - match := filter.Match(image, f) assert.Equal(t, cases[s], match) }) } diff --git a/lxd/filter/operator.go b/lxd/filter/operator.go new file mode 100644 index 000000000000..833bf4f77bc8 --- /dev/null +++ b/lxd/filter/operator.go @@ -0,0 +1,34 @@ +package filter + +// OperatorSet is represents the types of operators and symbols that a filter can support. +type OperatorSet struct { + And string + Or string + Equals string + NotEquals string + + GreaterThan string + LessThan string + GreaterEqual string + LessEqual string + + Negate string + Quote []string +} + +// isValid ensures the OperatorSet has valid fields for the minimum supported operators. +func (o *OperatorSet) isValid() bool { + return o.And != "" && o.Or != "" && o.Equals != "" && o.NotEquals != "" && o.Negate != "" && len(o.Quote) > 0 +} + +// QueryOperatorSet returns the default operator set for LXD API queries. +func QueryOperatorSet() OperatorSet { + return OperatorSet{ + And: "and", + Or: "or", + Equals: "eq", + NotEquals: "ne", + Negate: "not", + Quote: []string{"\""}, + } +} From ffd3836100922660cec6cdd5515ef62b984d3fe9 Mon Sep 17 00:00:00 2001 From: Max Asnaashari Date: Tue, 20 Jun 2023 23:51:10 +0000 Subject: [PATCH 080/543] shared/filter: Move filter to shared package This will facilitate using the filter package in LXD related projects. Signed-off-by: Max Asnaashari --- {lxd => shared}/filter/clause.go | 0 {lxd => shared}/filter/clause_test.go | 0 {lxd => shared}/filter/doc.go | 0 {lxd => shared}/filter/match.go | 0 {lxd => shared}/filter/match_test.go | 0 {lxd => shared}/filter/operator.go | 0 {lxd => shared}/filter/value.go | 0 {lxd => shared}/filter/value_test.go | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename {lxd => shared}/filter/clause.go (100%) rename {lxd => shared}/filter/clause_test.go (100%) rename {lxd => shared}/filter/doc.go (100%) rename {lxd => shared}/filter/match.go (100%) rename {lxd => shared}/filter/match_test.go (100%) rename {lxd => shared}/filter/operator.go (100%) rename {lxd => shared}/filter/value.go (100%) rename {lxd => shared}/filter/value_test.go (100%) diff --git a/lxd/filter/clause.go b/shared/filter/clause.go similarity index 100% rename from lxd/filter/clause.go rename to shared/filter/clause.go diff --git a/lxd/filter/clause_test.go b/shared/filter/clause_test.go similarity index 100% rename from lxd/filter/clause_test.go rename to shared/filter/clause_test.go diff --git a/lxd/filter/doc.go b/shared/filter/doc.go similarity index 100% rename from lxd/filter/doc.go rename to shared/filter/doc.go diff --git a/lxd/filter/match.go b/shared/filter/match.go similarity index 100% rename from lxd/filter/match.go rename to shared/filter/match.go diff --git a/lxd/filter/match_test.go b/shared/filter/match_test.go similarity index 100% rename from lxd/filter/match_test.go rename to shared/filter/match_test.go diff --git a/lxd/filter/operator.go b/shared/filter/operator.go similarity index 100% rename from lxd/filter/operator.go rename to shared/filter/operator.go diff --git a/lxd/filter/value.go b/shared/filter/value.go similarity index 100% rename from lxd/filter/value.go rename to shared/filter/value.go diff --git a/lxd/filter/value_test.go b/shared/filter/value_test.go similarity index 100% rename from lxd/filter/value_test.go rename to shared/filter/value_test.go From 2fec82ca7f89db9b0aa50cfbd7c956ae3b743fe2 Mon Sep 17 00:00:00 2001 From: Max Asnaashari Date: Tue, 20 Jun 2023 23:54:28 +0000 Subject: [PATCH 081/543] lxd: update filter usages Signed-off-by: Max Asnaashari --- lxd/images.go | 25 ++++++++++++---------- lxd/instance/filter.go | 13 ++++++++---- lxd/instances_get.go | 16 +++++++-------- lxd/storage_volumes.go | 28 ++++++++++++++----------- lxd/warnings.go | 40 ++++++++++++++++++++++++------------ shared/filter/clause_test.go | 2 +- shared/filter/match_test.go | 2 +- shared/filter/value_test.go | 2 +- 8 files changed, 77 insertions(+), 51 deletions(-) diff --git a/lxd/images.go b/lxd/images.go index 986659bd1d74..961ee0253f87 100644 --- a/lxd/images.go +++ b/lxd/images.go @@ -32,7 +32,6 @@ import ( "github.com/canonical/lxd/lxd/db" dbCluster "github.com/canonical/lxd/lxd/db/cluster" "github.com/canonical/lxd/lxd/db/operationtype" - "github.com/canonical/lxd/lxd/filter" "github.com/canonical/lxd/lxd/instance" "github.com/canonical/lxd/lxd/instance/instancetype" "github.com/canonical/lxd/lxd/lifecycle" @@ -47,6 +46,7 @@ import ( "github.com/canonical/lxd/lxd/util" "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/filter" "github.com/canonical/lxd/shared/ioprogress" "github.com/canonical/lxd/shared/logger" "github.com/canonical/lxd/shared/osarch" @@ -1241,7 +1241,7 @@ func getImageMetadata(fname string) (*api.ImageMetadata, string, error) { return &result, imageType, nil } -func doImagesGet(ctx context.Context, tx *db.ClusterTx, recursion bool, projectName string, public bool, clauses []filter.Clause) (any, error) { +func doImagesGet(ctx context.Context, tx *db.ClusterTx, recursion bool, projectName string, public bool, clauses *filter.ClauseSet) (any, error) { mustLoadObjects := recursion || clauses != nil fingerprints, err := tx.GetImagesFingerprints(ctx, projectName, public) @@ -1267,8 +1267,15 @@ func doImagesGet(ctx context.Context, tx *db.ClusterTx, recursion bool, projectN continue } - if clauses != nil && !filter.Match(*image, clauses) { - continue + if clauses != nil { + match, err := filter.Match(*image, *clauses) + if err != nil { + return nil, err + } + + if !match { + continue + } } if recursion { @@ -1496,13 +1503,9 @@ func imagesGet(d *Daemon, r *http.Request) response.Response { filterStr := r.FormValue("filter") public := d.checkTrustedClient(r) != nil || allowProjectPermission("images", "view")(d, r) != response.EmptySyncResponse - var err error - var clauses []filter.Clause - if filterStr != "" { - clauses, err = filter.Parse(filterStr) - if err != nil { - return response.SmartError(fmt.Errorf("Invalid filter: %w", err)) - } + clauses, err := filter.Parse(filterStr, filter.QueryOperatorSet()) + if err != nil { + return response.SmartError(fmt.Errorf("Invalid filter: %w", err)) } var result any diff --git a/lxd/instance/filter.go b/lxd/instance/filter.go index 1f89e6d2d199..ea1b43a6efae 100644 --- a/lxd/instance/filter.go +++ b/lxd/instance/filter.go @@ -1,20 +1,25 @@ package instance import ( - "github.com/canonical/lxd/lxd/filter" "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/filter" ) // FilterFull returns a filtered list of full instances that match the given clauses. -func FilterFull(instances []*api.InstanceFull, clauses []filter.Clause) []*api.InstanceFull { +func FilterFull(instances []*api.InstanceFull, clauses filter.ClauseSet) ([]*api.InstanceFull, error) { filtered := []*api.InstanceFull{} for _, instance := range instances { - if !filter.Match(*instance, clauses) { + match, err := filter.Match(*instance, clauses) + if err != nil { + return nil, err + } + + if !match { continue } filtered = append(filtered, instance) } - return filtered + return filtered, nil } diff --git a/lxd/instances_get.go b/lxd/instances_get.go index e5902940c490..b1c6ab513926 100644 --- a/lxd/instances_get.go +++ b/lxd/instances_get.go @@ -17,7 +17,6 @@ import ( "github.com/canonical/lxd/lxd/db" dbCluster "github.com/canonical/lxd/lxd/db/cluster" "github.com/canonical/lxd/lxd/db/query" - "github.com/canonical/lxd/lxd/filter" "github.com/canonical/lxd/lxd/instance" "github.com/canonical/lxd/lxd/instance/instancetype" "github.com/canonical/lxd/lxd/project" @@ -26,6 +25,7 @@ import ( "github.com/canonical/lxd/lxd/state" "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/filter" "github.com/canonical/lxd/shared/logger" "github.com/canonical/lxd/shared/version" ) @@ -258,12 +258,9 @@ func doInstancesGet(s *state.State, r *http.Request) (any, error) { // Parse filter value. filterStr := r.FormValue("filter") - var clauses []filter.Clause - if filterStr != "" { - clauses, err = filter.Parse(filterStr) - if err != nil { - return nil, fmt.Errorf("Invalid filter: %w", err) - } + clauses, err := filter.Parse(filterStr, filter.QueryOperatorSet()) + if err != nil { + return nil, fmt.Errorf("Invalid filter: %w", err) } mustLoadObjects := recursion > 0 || (recursion == 0 && clauses != nil) @@ -489,7 +486,10 @@ func doInstancesGet(s *state.State, r *http.Request) (any, error) { // Filter result list if needed. if clauses != nil { - resultFullList = instance.FilterFull(resultFullList, clauses) + resultFullList, err = instance.FilterFull(resultFullList, *clauses) + if err != nil { + return nil, err + } } if recursion == 0 { diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go index c9bc3b7e4cb7..49f630f17164 100644 --- a/lxd/storage_volumes.go +++ b/lxd/storage_volumes.go @@ -25,7 +25,6 @@ import ( "github.com/canonical/lxd/lxd/db" "github.com/canonical/lxd/lxd/db/cluster" "github.com/canonical/lxd/lxd/db/operationtype" - "github.com/canonical/lxd/lxd/filter" "github.com/canonical/lxd/lxd/instance" "github.com/canonical/lxd/lxd/operations" "github.com/canonical/lxd/lxd/project" @@ -37,6 +36,7 @@ import ( "github.com/canonical/lxd/lxd/util" "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/filter" "github.com/canonical/lxd/shared/logger" "github.com/canonical/lxd/shared/version" ) @@ -314,13 +314,9 @@ func storagePoolVolumesGet(d *Daemon, r *http.Request) response.Response { } filterStr := r.FormValue("filter") - var clauses []filter.Clause - if filterStr != "" { - var err error - clauses, err = filter.Parse(filterStr) - if err != nil { - return response.SmartError(fmt.Errorf("Invalid filter: %w", err)) - } + clauses, err := filter.Parse(filterStr, filter.QueryOperatorSet()) + if err != nil { + return response.SmartError(fmt.Errorf("Invalid filter: %w", err)) } // Retrieve ID of the storage pool (and check if the storage pool exists). @@ -419,7 +415,10 @@ func storagePoolVolumesGet(d *Daemon, r *http.Request) response.Response { return response.SmartError(err) } - dbVolumes = filterVolumes(dbVolumes, clauses, allProjects, projectImages) + dbVolumes, err = filterVolumes(dbVolumes, clauses, allProjects, projectImages) + if err != nil { + return response.SmartError(err) + } // Sort by type then volume name. sort.SliceStable(dbVolumes, func(i, j int) bool { @@ -459,7 +458,7 @@ func storagePoolVolumesGet(d *Daemon, r *http.Request) response.Response { } // filterVolumes returns a filtered list of volumes that match the given clauses. -func filterVolumes(volumes []*db.StorageVolume, clauses []filter.Clause, allProjects bool, filterProjectImages []string) []*db.StorageVolume { +func filterVolumes(volumes []*db.StorageVolume, clauses *filter.ClauseSet, allProjects bool, filterProjectImages []string) ([]*db.StorageVolume, error) { // FilterStorageVolume is for filtering purpose only. // It allows to filter snapshots by using default filter mechanism. type FilterStorageVolume struct { @@ -479,14 +478,19 @@ func filterVolumes(volumes []*db.StorageVolume, clauses []filter.Clause, allProj Snapshot: strconv.FormatBool(strings.Contains(volume.Name, shared.SnapshotDelimiter)), } - if !filter.Match(tmpVolume, clauses) { + match, err := filter.Match(tmpVolume, *clauses) + if err != nil { + return nil, err + } + + if !match { continue } filtered = append(filtered, volume) } - return filtered + return filtered, nil } // swagger:operation POST /1.0/storage-pools/{poolName}/volumes/{type} storage storage_pool_volumes_type_post diff --git a/lxd/warnings.go b/lxd/warnings.go index 23ba0f72894e..fda5072f2fe0 100644 --- a/lxd/warnings.go +++ b/lxd/warnings.go @@ -16,7 +16,6 @@ import ( "github.com/canonical/lxd/lxd/db/cluster" "github.com/canonical/lxd/lxd/db/operationtype" "github.com/canonical/lxd/lxd/db/warningtype" - "github.com/canonical/lxd/lxd/filter" "github.com/canonical/lxd/lxd/lifecycle" "github.com/canonical/lxd/lxd/operations" "github.com/canonical/lxd/lxd/project" @@ -25,6 +24,7 @@ import ( "github.com/canonical/lxd/lxd/state" "github.com/canonical/lxd/lxd/task" "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/filter" "github.com/canonical/lxd/shared/logger" "github.com/canonical/lxd/shared/version" ) @@ -44,18 +44,23 @@ var warningCmd = APIEndpoint{ Delete: APIEndpointAction{Handler: warningDelete}, } -func filterWarnings(warnings []api.Warning, clauses []filter.Clause) []api.Warning { +func filterWarnings(warnings []api.Warning, clauses *filter.ClauseSet) ([]api.Warning, error) { filtered := []api.Warning{} for _, warning := range warnings { - if !filter.Match(warning, clauses) { + match, err := filter.Match(warning, *clauses) + if err != nil { + return nil, err + } + + if !match { continue } filtered = append(filtered, warning) } - return filtered + return filtered, nil } // swagger:operation GET /1.0/warnings warnings warnings_get @@ -156,14 +161,10 @@ func warningsGet(d *Daemon, r *http.Request) response.Response { } // Parse filter value - var clauses []filter.Clause - filterStr := r.FormValue("filter") - if filterStr != "" { - clauses, err = filter.Parse(filterStr) - if err != nil { - return response.SmartError(fmt.Errorf("Failed to filter warnings: %w", err)) - } + clauses, err := filter.Parse(filterStr, filter.QueryOperatorSet()) + if err != nil { + return response.SmartError(fmt.Errorf("Failed to filter warnings: %w", err)) } // Parse the project field @@ -199,10 +200,16 @@ func warningsGet(d *Daemon, r *http.Request) response.Response { return response.SmartError(err) } + var filters []api.Warning if recursion == 0 { var resultList []string - for _, w := range filterWarnings(warnings, clauses) { + filters, err = filterWarnings(warnings, clauses) + if err != nil { + return response.SmartError(err) + } + + for _, w := range filters { url := fmt.Sprintf("/%s/warnings/%s", version.APIVersion, w.UUID) resultList = append(resultList, url) } @@ -210,8 +217,15 @@ func warningsGet(d *Daemon, r *http.Request) response.Response { return response.SyncResponse(true, resultList) } + if filters == nil { + filters, err = filterWarnings(warnings, clauses) + if err != nil { + return response.SmartError(err) + } + } + // Return detailed list of warning - return response.SyncResponse(true, filterWarnings(warnings, clauses)) + return response.SyncResponse(true, filters) } // swagger:operation GET /1.0/warnings/{uuid} warnings warning_get diff --git a/shared/filter/clause_test.go b/shared/filter/clause_test.go index bd8f18f7404c..901fff125ccc 100644 --- a/shared/filter/clause_test.go +++ b/shared/filter/clause_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/canonical/lxd/lxd/filter" + "github.com/canonical/lxd/shared/filter" ) func TestParse_Error(t *testing.T) { diff --git a/shared/filter/match_test.go b/shared/filter/match_test.go index dcd00c031490..c6a69fc63c17 100644 --- a/shared/filter/match_test.go +++ b/shared/filter/match_test.go @@ -7,8 +7,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/canonical/lxd/lxd/filter" "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/filter" ) func TestMatch_Instance(t *testing.T) { diff --git a/shared/filter/value_test.go b/shared/filter/value_test.go index 7a45a5611356..4f0bdd50111a 100644 --- a/shared/filter/value_test.go +++ b/shared/filter/value_test.go @@ -6,8 +6,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/canonical/lxd/lxd/filter" "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/filter" ) func TestValueOf_Instance(t *testing.T) { From edaedcedb6c32c09ed2685c7db919ad4473b0b42 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Thu, 22 Jun 2023 12:11:30 +0100 Subject: [PATCH 082/543] lxd/instance/drivers/driver/qemu: Fix VMs on filesystems that do not support direct I/O Closes #11862 Signed-off-by: Thomas Parrott --- lxd/instance/drivers/driver_qemu.go | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 81e562c52fda..e3e524a289fa 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -3418,13 +3418,20 @@ func (d *qemu) addDriveConfig(bootIndexes map[string]int, driveConf deviceConfig // If backing FS is ZFS or BTRFS, avoid using direct I/O and use host page cache only. // We've seen ZFS lock up and BTRFS checksum issues when using direct I/O on image files. if fsType == "zfs" || fsType == "btrfs" { - if driveConf.FSType != "iso9660" { - // Only warn about using writeback cache if the drive image is writable. - d.logger.Warn("Using writeback cache I/O", logger.Ctx{"device": driveConf.DevName, "devPath": srcDevPath, "fsType": fsType}) - } - aioMode = "threads" cacheMode = "writeback" // Use host cache, with neither O_DSYNC nor O_DIRECT semantics. + } else { + // Use host cache, with neither O_DSYNC nor O_DIRECT semantics if filesystem + // doesn't support Direct I/O. + _, err := os.OpenFile(srcDevPath, unix.O_DIRECT|unix.O_RDONLY, 0) + if err != nil { + cacheMode = "writeback" + } + } + + if cacheMode == "writeback" && driveConf.FSType != "iso9660" { + // Only warn about using writeback cache if the drive image is writable. + d.logger.Warn("Using writeback cache I/O", logger.Ctx{"device": driveConf.DevName, "devPath": srcDevPath, "fsType": fsType}) } // Special case ISO images as cdroms. From 2a2e8d49743401a4567204e519238f6a212abce1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Fri, 16 Jun 2023 12:16:12 +0200 Subject: [PATCH 083/543] lxd/shared: Add utility function to parse the target query param MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- shared/util.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/shared/util.go b/shared/util.go index 8a9a7fdd4efb..b7c763b54478 100644 --- a/shared/util.go +++ b/shared/util.go @@ -1368,3 +1368,15 @@ func JoinTokenDecode(input string) (*api.ClusterMemberJoinToken, error) { return &j, nil } + +// TargetDetect returns either target node or group based on the provided prefix: +// An invocation with `target=h1` returns "h1", "" and `target=@g1` returns "", "g1". +func TargetDetect(target string) (targetNode string, targetGroup string) { + if strings.HasPrefix(target, "@") { + targetGroup = strings.TrimPrefix(target, "@") + } else { + targetNode = target + } + + return +} From e0f20325fd61b9a6ced3a79c5551ac4a115814a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Tue, 13 Jun 2023 11:42:27 +0200 Subject: [PATCH 084/543] lxd/project: Add functions to validate projects cluster member and group restrictions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/project/permissions.go | 111 +++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) diff --git a/lxd/project/permissions.go b/lxd/project/permissions.go index 58848955d996..796982c59a5a 100644 --- a/lxd/project/permissions.go +++ b/lxd/project/permissions.go @@ -1513,3 +1513,114 @@ func AllowSnapshotCreation(p *api.Project) error { return nil } + +// GetRestrictedClusterGroups returns a slice of restricted cluster groups for the given project. +func GetRestrictedClusterGroups(p *api.Project) []string { + return shared.SplitNTrimSpace(p.Config["restricted.cluster.groups"], ",", -1, true) +} + +// AllowClusterMember returns nil if the given project is allowed to use the cluster member. +func AllowClusterMember(p *api.Project, member *db.NodeInfo) error { + clusterGroupsAllowed := GetRestrictedClusterGroups(p) + + if shared.IsTrue(p.Config["restricted"]) && len(clusterGroupsAllowed) > 0 { + for _, memberGroupName := range member.Groups { + if shared.StringInSlice(memberGroupName, clusterGroupsAllowed) { + return nil + } + } + + return fmt.Errorf("Project isn't allowed to use this cluster member: %q", member.Name) + } + + return nil +} + +// AllowClusterGroup returns nil if the given project is allowed to use the cluster groupName. +func AllowClusterGroup(p *api.Project, groupName string) error { + clusterGroupsAllowed := GetRestrictedClusterGroups(p) + + // Skip the check if the project is not restricted + if shared.IsFalseOrEmpty(p.Config["restricted"]) { + return nil + } + + if len(clusterGroupsAllowed) > 0 && !shared.StringInSlice(groupName, clusterGroupsAllowed) { + return fmt.Errorf("Project isn't allowed to use this cluster group: %q", groupName) + } + + return nil +} + +// CheckTargetMember checks if the given targetMemberName is present in allMembers +// and is allowed for the project. +// If the target member is allowed it returns the resolved node information. +func CheckTargetMember(p *api.Project, targetMemberName string, allMembers []db.NodeInfo) (*db.NodeInfo, error) { + // Find target member. + for _, potentialMember := range allMembers { + if potentialMember.Name == targetMemberName { + // If restricted groups are specified then check member is in at least one of them. + err := AllowClusterMember(p, &potentialMember) + if err != nil { + return nil, api.StatusErrorf(http.StatusForbidden, err.Error()) + } + + return &potentialMember, nil + } + } + + return nil, api.StatusErrorf(http.StatusNotFound, "Cluster member %q not found", targetMemberName) +} + +// CheckTargetGroup checks if the given groupName is allowed for the project. +func CheckTargetGroup(ctx context.Context, tx *db.ClusterTx, p *api.Project, groupName string) error { + // If restricted groups are specified then check the requested group is in the list. + err := AllowClusterGroup(p, groupName) + if err != nil { + return api.StatusErrorf(http.StatusForbidden, err.Error()) + } + + // Check if the target group exists. + targetGroupExists, err := tx.ClusterGroupExists(groupName) + if err != nil { + return err + } + + if !targetGroupExists { + return api.StatusErrorf(http.StatusBadRequest, "Cluster group %q doesn't exist", groupName) + } + + return nil +} + +// CheckTarget checks if the given cluster target (member or group) is allowed. +// If target is a cluster member and is found in allMembers it returns the resolved node information object. +// If target is a cluster group it returns the cluster group name. +// In case of error, neither node information nor cluster group name gets returned. +func CheckTarget(ctx context.Context, r *http.Request, tx *db.ClusterTx, p *api.Project, target string, allMembers []db.NodeInfo) (*db.NodeInfo, string, error) { + targetMemberName, targetGroupName := shared.TargetDetect(target) + + // Check manual cluster member targeting restrictions. + err := CheckClusterTargetRestriction(r, p, target) + if err != nil { + return nil, "", err + } + + if targetMemberName != "" { + member, err := CheckTargetMember(p, targetMemberName, allMembers) + if err != nil { + return nil, "", err + } + + return member, "", nil + } else if targetGroupName != "" { + err := CheckTargetGroup(ctx, tx, p, targetGroupName) + if err != nil { + return nil, "", err + } + + return nil, targetGroupName, nil + } + + return nil, "", nil +} From a6b2a074f567e363b2c4271215dd3e71d645e42e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Mon, 19 Jun 2023 17:02:17 +0200 Subject: [PATCH 085/543] lxd/db: Return a not found error if there isn't any node with least amount of instances MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Depending on the given list, a node might not be available for selection. Signed-off-by: Julian Pelizäus --- lxd/db/node.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lxd/db/node.go b/lxd/db/node.go index b1e5c5af1f8c..41f0224d8b85 100644 --- a/lxd/db/node.go +++ b/lxd/db/node.go @@ -1139,6 +1139,10 @@ func (c *ClusterTx) GetNodeWithLeastInstances(ctx context.Context, members []Nod } } + if member == nil { + return nil, api.StatusErrorf(http.StatusNotFound, "No suitable cluster member could be found") + } + return member, nil } From 36d76e646ada115bb0cec6676e6746c864b46f28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Wed, 21 Jun 2023 12:30:05 +0200 Subject: [PATCH 086/543] lxd/api/cluster: Handle error if no evacuation target can be found MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/api_cluster.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go index 863e1af25791..5f01d041a1bb 100644 --- a/lxd/api_cluster.go +++ b/lxd/api_cluster.go @@ -3124,13 +3124,11 @@ func evacuateInstances(ctx context.Context, opts evacuateOpts) error { targetMemberInfo, err := evacuateClusterSelectTarget(ctx, opts.s, opts.gateway, inst, candidateMembers) if err != nil { - return err - } - - // Skip migration if no target available. - if targetMemberInfo == nil { - l.Warn("No migration target available for instance") - continue + if api.StatusErrorCheck(err, http.StatusNotFound) { + // Skip migration if no target is available + l.Warn("No migration target available for instance") + continue + } } // Start migrating the instance. From a2ca756aaa107684f6091af10f30bd64a050f12c Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Fri, 22 Sep 2023 13:03:46 +0100 Subject: [PATCH 087/543] .github/labeler: Bring into sync with main Signed-off-by: Thomas Parrott --- .github/labeler.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/labeler.yml b/.github/labeler.yml index f219e55c5361..0e4b213e1519 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -4,4 +4,3 @@ API: - shared/api/**/* Documentation: - doc/**/* -- .sphinx/**/* From b3986881891cb33ac5018bb83c56bb19ab4bb17d Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Fri, 22 Sep 2023 13:06:30 +0100 Subject: [PATCH 088/543] github/workflows/tests: Bring into sync with main Signed-off-by: Thomas Parrott --- .github/workflows/tests.yml | 80 ++++++++++++++++++++++++++----------- 1 file changed, 56 insertions(+), 24 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index f960d67a0995..56632e313998 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -12,11 +12,16 @@ concurrency: jobs: code-tests: + env: + CGO_CFLAGS: "-I/home/runner/work/lxd/lxd-test/vendor/raft/include/ -I/home/runner/work/lxd/lxd-test/vendor/dqlite/include/" + CGO_LDFLAGS: "-L/home/runner/work/lxd/lxd-test/vendor/raft/.libs/ -L/home/runner/work/lxd/lxd-test/vendor/dqlite/.libs/" + LD_LIBRARY_PATH: "/home/runner/work/lxd/lxd-test/vendor/raft/.libs/:/home/runner/work/lxd/lxd-test/vendor/dqlite/.libs/" + CGO_LDFLAGS_ALLOW: "(-Wl,-wrap,pthread_create)|(-Wl,-z,now)" name: Code tests runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Dependency Review uses: actions/dependency-review-action@v3 @@ -30,17 +35,16 @@ jobs: - name: Install dependencies run: | sudo add-apt-repository ppa:ubuntu-lxc/lxc-git-master -y --no-update - sudo add-apt-repository ppa:dqlite/dev -y --no-update sudo apt-get update sudo apt-get install --no-install-recommends -y \ + build-essential \ curl \ gettext \ git \ libacl1-dev \ libcap-dev \ libdbus-1-dev \ - libdqlite-dev \ liblxc-dev \ lxc-templates \ libseccomp-dev \ @@ -48,17 +52,30 @@ jobs: libsqlite3-dev \ libtool \ libudev-dev \ + libuv1-dev \ make \ pkg-config \ shellcheck python3 -m pip install flake8 - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.46.2 - name: Download go dependencies run: | go mod download + - name: Make LXD tarball and unpack it + env: + CUSTOM_VERSION: "test" + run: | + make dist + tar -xzf lxd-test.tar.gz -C ~/work/lxd/ + rm lxd-test.tar.gz + + - name: Build LXD dependencies + run: | + cd ~/work/lxd/lxd-test + make deps + - name: Run LXD build run: | make @@ -69,7 +86,7 @@ jobs: - name: Unit tests (all) run: | - sudo go test ./... + sudo --preserve-env=CGO_CFLAGS,CGO_LDFLAGS,CGO_LDFLAGS_ALLOW,LD_LIBRARY_PATH LD_LIBRARY_PATH=${LD_LIBRARY_PATH} go test ./... system-tests: env: @@ -77,6 +94,7 @@ jobs: LXD_SHIFTFS_DISABLE: "true" LXD_CEPH_CLUSTER: "ceph" LXD_CEPH_CEPHFS: "cephfs" + LXD_CONCURRENT: "1" LXD_VERBOSE: "1" LXD_OFFLINE: "1" LXD_TMPFS: "1" @@ -104,7 +122,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Go (stable) uses: actions/setup-go@v4 @@ -114,7 +132,7 @@ jobs: - name: Install Go (tip) run: | - curl -sL https://storage.googleapis.com/go-build-snap/go/linux-amd64/$(git ls-remote https://github.com/golang/go.git HEAD | awk '{print $1;}').tar.gz -o gotip.tar.gz + curl -sSfL https://storage.googleapis.com/go-build-snap/go/linux-amd64/$(git ls-remote https://github.com/golang/go.git HEAD | awk '{print $1;}').tar.gz -o gotip.tar.gz ls -lah gotip.tar.gz mkdir -p ~/sdk/gotip tar -C ~/sdk/gotip -xzf gotip.tar.gz @@ -130,13 +148,16 @@ jobs: sudo apt-get update sudo snap remove lxd --purge + sudo snap remove core20 --purge || true sudo apt-get autopurge moby-containerd docker uidmap -y sudo ip link delete docker0 sudo nft flush ruleset + sudo systemctl mask lxc.service + sudo systemctl mask lxc-net.service + sudo apt-get install --no-install-recommends -y \ curl \ - dnsutils \ git \ libacl1-dev \ libcap-dev \ @@ -156,8 +177,10 @@ jobs: btrfs-progs \ busybox-static \ dnsmasq-base \ + easy-rsa \ gettext \ jq \ + lxc-utils \ lvm2 \ nftables \ quota \ @@ -173,6 +196,10 @@ jobs: xfsprogs \ xz-utils \ zfsutils-linux + + # reclaim some space + sudo apt-get clean + mkdir -p "$(go env GOPATH)/bin" - name: Download go dependencies @@ -187,7 +214,18 @@ jobs: if: ${{ matrix.backend == 'ceph' }} run: | set -x - sudo apt-get install --no-install-recommends -y snapd ceph-common + + # If the rootfs and the ephemeral part are on the same physical disk, giving the whole + # disk to microceph would wipe our rootfs. Since it is pretty rare for GitHub Action + # runners to have a single disk, we immediately bail rather than trying to gracefully + # handle it. Once snapd releases with https://github.com/snapcore/snapd/pull/13150, + # we will be able to stop worrying about that special case. + if [ "$(stat -c '%d' /)" = "$(stat -c '%d' /mnt)" ]; then + echo "FAIL: rootfs and ephemeral part on the same disk, aborting" + exit 1 + fi + + sudo apt-get install --no-install-recommends -y ceph-common sudo snap install microceph --edge sleep 5 sudo microceph cluster bootstrap @@ -199,17 +237,11 @@ jobs: for flag in nosnaptrim noscrub nobackfill norebalance norecover noscrub nodeep-scrub; do sudo microceph.ceph osd set $flag done - # Use ephemeral disk mounted on /mnt for ceph OSD. - # The block-devices plug doesn't allow accessing /dev/loopX devices so we make those same devices - # available under alternate names (/dev/sdiY) that are not used inside GitHub Action runners. + # Repurpose the ephemeral disk for ceph OSD. sudo swapoff /mnt/swapfile - sudo rm -f /mnt/swapfile - loop_file="/mnt/ceph-osd.img" - sudo fallocate -l 10G "${loop_file}" - loop_dev="$(sudo losetup --show --direct-io=on --nooverlap -f "${loop_file}")" - devInfo=($(sudo stat -c '%t %T' "${loop_dev}")) - sudo mknod -m 0660 /dev/sdia b 0x"${devInfo[0]}" 0x"${devInfo[1]}" - sudo microceph disk add --wipe /dev/sdia + ephemeral_disk="$(findmnt --noheadings --output SOURCE --target /mnt | sed 's/[0-9]\+$//')" + sudo umount /mnt + sudo microceph disk add --wipe "${ephemeral_disk}" sudo rm -rf /etc/ceph sudo ln -s /var/snap/microceph/current/conf/ /etc/ceph sudo microceph enable rgw @@ -226,7 +258,7 @@ jobs: chmod +x ~ echo "root:1000000:1000000000" | sudo tee /etc/subuid /etc/subgid cd test - sudo --preserve-env=PATH,GOPATH,LXD_VERBOSE,LXD_BACKEND,LXD_CEPH_CLUSTER,LXD_CEPH_CEPHFS,LXD_CEPH_CEPHOBJECT_RADOSGW,LXD_OFFLINE,LXD_SKIP_TESTS,LXD_REQUIRED_TESTS,LXD_SHIFTFS_DISABLE LXD_BACKEND=${{ matrix.backend }} ./main.sh ${{ matrix.suite }} + sudo --preserve-env=PATH,GOPATH,GITHUB_ACTIONS,LXD_VERBOSE,LXD_BACKEND,LXD_CEPH_CLUSTER,LXD_CEPH_CEPHFS,LXD_CEPH_CEPHOBJECT_RADOSGW,LXD_OFFLINE,LXD_SKIP_TESTS,LXD_REQUIRED_TESTS,LXD_SHIFTFS_DISABLE LXD_BACKEND=${{ matrix.backend }} ./main.sh ${{ matrix.suite }} client: name: Client tests @@ -243,7 +275,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Go uses: actions/setup-go@v4 @@ -294,7 +326,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install dependencies run: | @@ -313,7 +345,7 @@ jobs: uses: get-woke/woke-action@v0 with: fail-on-error: true - woke-args: "*.md **/*.md -c https://github.com/canonical-web-and-design/Inclusive-naming/raw/main/config.yml" + woke-args: "*.md **/*.md -c https://github.com/canonical/Inclusive-naming/raw/main/config.yml" - name: Run link checker run: | @@ -339,7 +371,7 @@ jobs: if: ${{ github.repository == 'canonical/lxd' && github.event_name == 'push'}} steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Launchpad SSH access env: From af9ec2acad34fcb4fcdcf6417123f062115def6b Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Fri, 22 Sep 2023 13:16:29 +0100 Subject: [PATCH 089/543] shared/cmd/table/test: Fix imports Signed-off-by: Thomas Parrott --- shared/cmd/table_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared/cmd/table_test.go b/shared/cmd/table_test.go index 53a4d5ac5f71..6be20556e15b 100644 --- a/shared/cmd/table_test.go +++ b/shared/cmd/table_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/suite" - "github.com/lxc/lxd/shared/api" + "github.com/canonical/lxd/shared/api" ) type tableSuite struct { From 598081f706d2d1413b8e865e76c58fe0dc4882d1 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Fri, 22 Sep 2023 13:14:29 +0100 Subject: [PATCH 090/543] gomod: Update deps Pin github.com/mdlayher/socket to v0.4.1 for Go 1.18 compat. Signed-off-by: Thomas Parrott --- go.mod | 42 ++++++------- go.sum | 184 ++++++++++++++++----------------------------------------- 2 files changed, 72 insertions(+), 154 deletions(-) diff --git a/go.mod b/go.mod index c686602f6765..bfbf3d581bf6 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/canonical/candid v1.12.2 github.com/canonical/go-dqlite v1.20.0 github.com/checkpoint-restore/go-criu/v6 v6.3.0 - github.com/digitalocean/go-qemu v0.0.0-20221209210016-f035778c97f7 + github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e github.com/dustinkirkland/golang-petname v0.0.0-20230626224747-e794b9370d49 github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 @@ -30,31 +30,32 @@ require ( github.com/mdlayher/ndp v1.0.1 github.com/mdlayher/netx v0.0.0-20230430222610-7e21880baee8 github.com/mdlayher/vsock v1.2.1 - github.com/miekg/dns v1.1.55 - github.com/minio/minio-go/v7 v7.0.59 + github.com/miekg/dns v1.1.56 + github.com/minio/minio-go/v7 v7.0.63 github.com/olekukonko/tablewriter v0.0.5 - github.com/osrg/gobgp/v3 v3.16.0 + github.com/osrg/gobgp/v3 v3.18.0 github.com/pborman/uuid v1.2.1 - github.com/pkg/sftp v1.13.5 + github.com/pkg/sftp v1.13.6 github.com/pkg/xattr v0.4.9 github.com/robfig/cron/v3 v3.0.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.4 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 - golang.org/x/crypto v0.11.0 + golang.org/x/crypto v0.13.0 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.10.0 - golang.org/x/term v0.10.0 - golang.org/x/text v0.11.0 + golang.org/x/sys v0.12.0 + golang.org/x/term v0.12.0 + golang.org/x/text v0.13.0 google.golang.org/protobuf v1.31.0 gopkg.in/juju/environschema.v1 v1.0.1 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 - k8s.io/utils v0.0.0-20230505201702-9f6742963106 + k8s.io/utils v0.0.0-20230726121419-3b25d923346b ) require ( + cloud.google.com/go/compute v1.23.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect @@ -63,11 +64,10 @@ require ( github.com/eapache/channels v1.1.0 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/ghodss/yaml v1.0.0 // indirect github.com/go-macaroon-bakery/macaroonpb v1.0.0 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/renameio v1.0.1 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.3.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jkeiser/iter v0.0.0-20200628201005-c8aa0ae784d1 // indirect @@ -83,12 +83,12 @@ require ( github.com/juju/webbrowser v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/k-sone/critbitgo v1.4.0 // indirect - github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/compress v1.17.0 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kr/fs v0.1.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-isatty v0.0.19 // indirect - github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mdlayher/socket v0.4.1 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/sha256-simd v1.0.1 // indirect @@ -96,7 +96,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect @@ -108,15 +108,15 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.16.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netns v0.0.4 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.12.0 // indirect - golang.org/x/tools v0.11.0 // indirect - google.golang.org/genproto v0.0.0-20230629202037-9506855d4529 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/grpc v1.56.2 // indirect + golang.org/x/net v0.15.0 // indirect + golang.org/x/tools v0.13.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect + google.golang.org/grpc v1.58.2 // indirect gopkg.in/errgo.v1 v1.0.1 // indirect gopkg.in/httprequest.v1 v1.2.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 3158aabd305c..6ff9073686dc 100644 --- a/go.sum +++ b/go.sum @@ -20,15 +20,14 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= @@ -63,8 +62,6 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/canonical/candid v1.12.2 h1:0hfZZ1qBpFCoCrirSeSdxmOWvQnCs+1cjpft9yRs0so= github.com/canonical/candid v1.12.2/go.mod h1:NiCD+Go6m2oxWcsfntU9t2Cs6uZbs8UaTRt3ySubaXU= -github.com/canonical/go-dqlite v1.11.7 h1:eS+jif6HJ4HzKatQePvQggZI6xQukzg94zC9qLiGVgA= -github.com/canonical/go-dqlite v1.11.7/go.mod h1:Dwp/03G1r4dtc+QSB9tV84RAAS7Mc6gy7tEvzCgcuQ8= github.com/canonical/go-dqlite v1.20.0 h1:pnkn0oS0hPXWeODjvjWONKGb5KYh8kK0aruDPzZLwmU= github.com/canonical/go-dqlite v1.20.0/go.mod h1:Uvy943N8R4CFUAs59A1NVaziWY9nJ686lScY7ywurfg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -81,7 +78,6 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b h1:ACGZRIr7HsgBKHsueQ1yM4WaVaXh21ynwqsF8M8tXhA= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -98,15 +94,13 @@ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WA github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/digitalocean/go-libvirt v0.0.0-20221205150000-2939327a8519 h1:OpkN/n40cmKenDQS+IOAeW9DLhYy4DADSeZnouCEV/E= github.com/digitalocean/go-libvirt v0.0.0-20221205150000-2939327a8519/go.mod h1:WyJJyfmJ0gWJvjV+ZH4DOgtOYZc1KOvYyBXWCLKxsUU= -github.com/digitalocean/go-qemu v0.0.0-20221209210016-f035778c97f7 h1:3OVJAbR131SnAXao7c9w8bFlAGH0oa29DCwsa88MJGk= -github.com/digitalocean/go-qemu v0.0.0-20221209210016-f035778c97f7/go.mod h1:K4+o74YGNjOb9N6yyG+LPj1NjHtk+Qz0IYQPvirbaLs= +github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e h1:x5PInTuXLddHWHlePCNAcM8QtUfOGx44f3UmYPMtDcI= +github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e/go.mod h1:K4+o74YGNjOb9N6yyG+LPj1NjHtk+Qz0IYQPvirbaLs= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= github.com/duo-labs/webauthn v0.0.0-20220815211337-00c9fb5711f5 h1:BaeJtFDlto/NjX9t730OebRRJf2P+t9YEDz3ur18824= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0 h1:90Ly+6UfUypEF6vvvW5rQIv9opIL8CbmW9FT20LDQoY= -github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0/go.mod h1:V+Qd57rJe8gd4eiGzZyg4h54VLHmYVVw54iMnlAMrF8= github.com/dustinkirkland/golang-petname v0.0.0-20230626224747-e794b9370d49 h1:6SNWi8VxQeCSwmLuTbEvJd7xvPmdS//zvMBWweZLgck= github.com/dustinkirkland/golang-petname v0.0.0-20230626224747-e794b9370d49/go.mod h1:V+Qd57rJe8gd4eiGzZyg4h54VLHmYVVw54iMnlAMrF8= github.com/eapache/channels v1.1.0 h1:F1taHcn7/F0i8DYqKXJnyhJcVpp2kgFcNePxXtnyu4k= @@ -119,11 +113,9 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= -github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 h1:fmFk0Wt3bBxxwZnu48jqMdaOR/IZ4vdtJFuaFV8MpIE= github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3/go.mod h1:bJWSKrZyQvfTnb2OudyUjurSG4/edverV7n82+K3JiM= @@ -132,7 +124,6 @@ github.com/frankban/quicktest v1.0.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBav github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= @@ -141,7 +132,6 @@ github.com/fullstorydev/grpcurl v1.8.1 h1:Pp648wlTTg3OKySeqxM5pzh8XF6vLqrm8wRq66 github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -230,8 +220,8 @@ github.com/google/renameio v1.0.1 h1:Lh/jXZmvZxb0BBeSY5VKEfidcbcbenKjZFzM/q0fSeU github.com/google/renameio v1.0.1/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= @@ -273,7 +263,6 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/j-keck/arping v1.0.3 h1:aeVk5WnsK6xPaRsFt5wV6W2x5l/n5XBNp0MMr/FEv2k= @@ -312,8 +301,6 @@ github.com/juju/cmd/v3 v3.0.0-20220202061353-b1cc80b193b0/go.mod h1:EoGJiEG+vbMw github.com/juju/collections v0.0.0-20200605021417-0d0ec82b7271/go.mod h1:5XgO71dV1JClcOJE+4dzdn4HrI5LiyKd7PlVG6eZYhY= github.com/juju/collections v0.0.0-20220203020748-febd7cad8a7a/go.mod h1:JWeZdyttIEbkR51z2S13+J+aCuHVe0F6meRy+P0YGDo= github.com/juju/collections v1.0.0/go.mod h1:JWeZdyttIEbkR51z2S13+J+aCuHVe0F6meRy+P0YGDo= -github.com/juju/collections v1.0.2 h1:y9t99Nq/uUZksJgWehiWxIr2vB1UG3hUT7LBNy1xiH8= -github.com/juju/collections v1.0.2/go.mod h1:kYJowQZYtHDvYDfZOvgf3Mt7mjKYwm/k1nqnJoMYOUc= github.com/juju/collections v1.0.4 h1:GjL+aN512m2rVDqhPII7P6qB0e+iYFubz8sqBhZaZtk= github.com/juju/collections v1.0.4/go.mod h1:hVrdB0Zwq9wIU1Fl6ItD2+UETeNeOEs+nGvJufVe+0c= github.com/juju/errors v0.0.0-20150916125642-1b5e39b83d18/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= @@ -392,14 +379,9 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY= -github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= -github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= @@ -414,8 +396,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lunixbochs/vtclean v0.0.0-20160125035106-4fbf7632a2c6/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/lxc/go-lxc v0.0.0-20220627182551-ad3d9f7cb822 h1:AYlgPPJzpZFA7zNWOzjUTpXZODB+j8T1M/k7K9E/WwQ= -github.com/lxc/go-lxc v0.0.0-20220627182551-ad3d9f7cb822/go.mod h1:ZSmlCm3aSNy4awYsId5tsGQUrB1MGd1Y8UTsf4DXNbI= github.com/lxc/go-lxc v0.0.0-20230621012608-be98af2b8b9f h1:998HYS7wwE0Rj+ttuSrj+G8hNBkx/7OwEYt+nTyPKdk= github.com/lxc/go-lxc v0.0.0-20230621012608-be98af2b8b9f/go.mod h1:663nEzGHUBfl31PNYjvYAjhhJ91i/TPsCnvqqiZKHp0= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -437,50 +417,32 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= -github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= -github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/mdlayher/ndp v1.0.0 h1:rcFaJVj04Rj47ZlV/t3iZcuKzlpwBuBsD3gR9AHDzcI= -github.com/mdlayher/ndp v1.0.0/go.mod h1:+3vkk6YnlL8ZTRTjmQanCNQFqDKOpP2zNyHl2HqyoZs= github.com/mdlayher/ndp v1.0.1 h1:+yAD79/BWyFlvAoeG5ncPS0ItlHP/eVbH7bQ6/+LVA4= github.com/mdlayher/ndp v1.0.1/go.mod h1:rf3wKaWhAYJEXFKpgF8kQ2AxypxVbfNcZbqoAo6fVzk= -github.com/mdlayher/netx v0.0.0-20220422152302-c711c2f8512f h1:h2oRsy/PeVMX4G3qQnlqBzRyKtjAowzHlp/qwvx+4oU= -github.com/mdlayher/netx v0.0.0-20220422152302-c711c2f8512f/go.mod h1:fwIXeygydjUR2l8Be1+G0PJSwHI4MpG5muNgH3qJqeQ= github.com/mdlayher/netx v0.0.0-20230430222610-7e21880baee8 h1:HMgSn3c16SXca3M+n6fLK2hXJLd4mhKAsZZh7lQfYmQ= github.com/mdlayher/netx v0.0.0-20230430222610-7e21880baee8/go.mod h1:qhZhwMDNWwZglKfwuWm0U9pCr/YKX1QAEwwJk9qfiTQ= -github.com/mdlayher/socket v0.4.0 h1:280wsy40IC9M9q1uPGcLBwXpcTQDtoGwVt+BNoITxIw= -github.com/mdlayher/socket v0.4.0/go.mod h1:xxFqz5GRCUN3UEOm9CZqEJsAbe1C8OwSK46NlmWuVoc= github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= -github.com/mdlayher/vsock v1.2.0 h1:klRY9lndjmg6k/QWbX/ucQ3e2JFRm1M7vfG9hijbQ0A= -github.com/mdlayher/vsock v1.2.0/go.mod h1:w4kdSTQB9p1l/WwGmAs0V62qQ869qRYoongwgN+Y1HE= github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.52 h1:Bmlc/qsNNULOe6bpXcUTsuOajd0DzRHwup6D9k1An0c= -github.com/miekg/dns v1.1.52/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= -github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= -github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= +github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.49 h1:dE5DfOtnXMXCjr/HWI6zN9vCrY6Sv666qhhiwUMvGV4= -github.com/minio/minio-go/v7 v7.0.49/go.mod h1:UI34MvQEiob3Cf/gGExGMmzugkM/tNgbFypNDy5LMVc= -github.com/minio/minio-go/v7 v7.0.59 h1:lxIXwsTIcQkYoEG25rUJbzpmSB/oWeVDmxFo/uWUUsw= -github.com/minio/minio-go/v7 v7.0.59/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= -github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/minio/minio-go/v7 v7.0.63 h1:GbZ2oCvaUdgT5640WJOpyDhhDxvknAJU2/T3yurwcbQ= +github.com/minio/minio-go/v7 v7.0.63/go.mod h1:Q6X7Qjb7WMhvG65qKf4gUgA5XaiSox74kR1uAEjxRS4= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -506,35 +468,29 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/osrg/gobgp/v3 v3.12.0 h1:coxnxOntqE1tKMM3a4ftBGe5ft/I5rklxlezISpBXx4= -github.com/osrg/gobgp/v3 v3.12.0/go.mod h1:rAPmqyijW79JIOkGu0BpLUCNdY769l7H+TlOKi5/5KY= -github.com/osrg/gobgp/v3 v3.16.0 h1:CqqFucQ9JS/iiJ2op1RtW8vtv1m8LHa1bwKUS2KxgiI= -github.com/osrg/gobgp/v3 v3.16.0/go.mod h1:tSUXn/s9uggSRTKP3IBeT5zI4ayOUX3O7fG5+n+SHPc= +github.com/osrg/gobgp/v3 v3.18.0 h1:/IbSvOv62lAsXXaIRLnGV6c8rzeZZZ1l/hNsmLaQ/EA= +github.com/osrg/gobgp/v3 v3.18.0/go.mod h1:tSUXn/s9uggSRTKP3IBeT5zI4ayOUX3O7fG5+n+SHPc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= -github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/peterh/liner v1.2.1/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= -github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= +github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE= github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= @@ -549,8 +505,6 @@ github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= -github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -559,8 +513,6 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -570,14 +522,10 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= @@ -585,8 +533,6 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0 github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= -github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -601,14 +547,11 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= @@ -632,15 +575,11 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.6 h1:Cy2qx3npLcYqTKqGJzMypnMv2tiRyifZJ17BlWIWA7A= go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.6 h1:TXQWYceBKqLp4sa87rcPs11SXxUA/mHwH975v+BDvLU= go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v2 v2.305.6 h1:fIDR0p4KMjw01MJMfUIDWdQbjo06PD6CeYM5z4EHLi0= go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU= -go.etcd.io/etcd/client/v3 v3.5.6 h1:coLs69PWCXE9G4FKquzNaSHrRyMCAXwF+IX1tAPVO8E= go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= go.etcd.io/etcd/etcdctl/v3 v3.5.0-alpha.0 h1:odMFuQQCg0UmPd7Cyw6TViRYv9ybGuXuki4CusDSzqA= go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0 h1:3yLUEC0nFCxw/RArImOyRUI4OAFbg4PFpBbAhSNzKNY= @@ -678,10 +617,9 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -718,8 +656,6 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -766,10 +702,9 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -782,8 +717,7 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -796,8 +730,6 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -852,26 +784,22 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -881,10 +809,9 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -943,10 +870,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= -golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1024,12 +949,11 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20230323212658-478b75c54725 h1:VmCWItVXcKboEMCwZaWge+1JLiTCQSngZeINF+wzO+g= -google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230629202037-9506855d4529 h1:9JucMWR7sPvCxUFd6UsOUNmA5kCcWOfORaT3tpAsKQs= -google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130 h1:2FZP5XuJY9zQyGM5N0rtovnoXjiMUEIUMvw0m9wlpLc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o= +google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1050,10 +974,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1067,8 +989,6 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1120,10 +1040,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 h1:xMMXJlJbsU8w3V5N2FLDQ8YgU8s1EoULdbQBcAeNJkY= -k8s.io/utils v0.0.0-20230313181309-38a27ef9d749/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= -k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= launchpad.net/xmlpath v0.0.0-20130614043138-000000000004/go.mod h1:vqyExLOM3qBx7mvYRkoxjSCF945s0mbe7YynlKYXtsA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= From 91a7c3930d13c1f39a606ce8e743066dc88975c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Mon, 19 Jun 2023 17:10:02 +0200 Subject: [PATCH 091/543] lxd/api: Restructure the cluster permission checks and use central functions for the instances endoint MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/instances_post.go | 77 +++++-------------------------------------- 1 file changed, 9 insertions(+), 68 deletions(-) diff --git a/lxd/instances_post.go b/lxd/instances_post.go index e94ff2205853..165294f497bd 100644 --- a/lxd/instances_post.go +++ b/lxd/instances_post.go @@ -851,7 +851,6 @@ func instancesPost(d *Daemon, r *http.Request) response.Response { var sourceInst *dbCluster.Instance var sourceImage *api.Image var sourceImageRef string - var clusterGroupsAllowed []string var candidateMembers []db.NodeInfo var targetMemberInfo *db.NodeInfo @@ -861,13 +860,6 @@ func instancesPost(d *Daemon, r *http.Request) response.Response { return api.StatusErrorf(http.StatusBadRequest, "Target only allowed when clustered") } - var targetMember, targetGroup string - if strings.HasPrefix(target, "@") { - targetGroup = strings.TrimPrefix(target, "@") - } else { - targetMember = target - } - dbProject, err := dbCluster.GetProject(ctx, tx.Tx(), targetProjectName) if err != nil { return fmt.Errorf("Failed loading project: %w", err) @@ -878,64 +870,19 @@ func instancesPost(d *Daemon, r *http.Request) response.Response { return err } + var targetGroupName string var allMembers []db.NodeInfo if clustered && !clusterNotification { - clusterGroupsAllowed = shared.SplitNTrimSpace(targetProject.Config["restricted.cluster.groups"], ",", -1, true) - - // Check manual cluster member targeting restrictions. - err = project.CheckClusterTargetRestriction(r, targetProject, target) - if err != nil { - return err - } - allMembers, err = tx.GetNodes(ctx) if err != nil { return fmt.Errorf("Failed getting cluster members: %w", err) } - if targetMember != "" { - // Find target member. - for i := range allMembers { - if allMembers[i].Name == targetMember { - targetMemberInfo = &allMembers[i] - break - } - } - - if targetMemberInfo == nil { - return api.StatusErrorf(http.StatusNotFound, "Cluster member not found") - } - - // If restricted groups are specified then check member is in at least one of them. - if shared.IsTrue(targetProject.Config["restricted"]) && len(clusterGroupsAllowed) > 0 { - found := false - for _, memberGroupName := range targetMemberInfo.Groups { - if shared.StringInSlice(memberGroupName, clusterGroupsAllowed) { - found = true - break - } - } - - if !found { - return api.StatusErrorf(http.StatusForbidden, "Project isn't allowed to use this cluster member") - } - } - } else if targetGroup != "" { - // If restricted groups are specified then check the requested group is in the list. - if shared.IsTrue(targetProject.Config["restricted"]) && len(clusterGroupsAllowed) > 0 && !shared.StringInSlice(targetGroup, clusterGroupsAllowed) { - return api.StatusErrorf(http.StatusForbidden, "Project isn't allowed to use this cluster group") - } - - // Check if the target group exists. - targetGroupExists, err := tx.ClusterGroupExists(targetGroup) - if err != nil { - return err - } - - if !targetGroupExists { - return api.StatusErrorf(http.StatusBadRequest, "Cluster group %q doesn't exist", targetGroup) - } + // Check if the given target is allowed and try to resolve the right member or group + targetMemberInfo, targetGroupName, err = project.CheckTarget(ctx, r, tx, targetProject, target, allMembers) + if err != nil { + return err } } @@ -1081,7 +1028,9 @@ func instancesPost(d *Daemon, r *http.Request) response.Response { } } - candidateMembers, err = tx.GetCandidateMembers(ctx, allMembers, architectures, targetGroup, clusterGroupsAllowed, s.GlobalConfig.OfflineThreshold()) + clusterGroupsAllowed := project.GetRestrictedClusterGroups(targetProject) + + candidateMembers, err = tx.GetCandidateMembers(ctx, allMembers, architectures, targetGroupName, clusterGroupsAllowed, s.GlobalConfig.OfflineThreshold()) if err != nil { return err } @@ -1114,15 +1063,7 @@ func instancesPost(d *Daemon, r *http.Request) response.Response { if targetMemberInfo == nil { err = s.DB.Cluster.Transaction(r.Context(), func(ctx context.Context, tx *db.ClusterTx) error { targetMemberInfo, err = tx.GetNodeWithLeastInstances(ctx, candidateMembers) - if err != nil { - return err - } - - if targetMemberInfo == nil { - return api.StatusErrorf(http.StatusBadRequest, "No suitable cluster member could be found") - } - - return nil + return err }) if err != nil { return response.SmartError(err) From 83cb437706b64403d11c949fe505e712d171b401 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Tue, 20 Jun 2023 13:22:44 +0200 Subject: [PATCH 092/543] lxd/api: Allow the basic selection of cluster groups when moving instances MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When supplying `--target` use the @ character followed by a cluster group name like it is already implemented for the creation of new instances. Selects the cluster group member based on scheduler hints or falls back to choosing the member with the least amount of instances. In addition the comments were updated to use the term instances instead of containers. Alongside this change the `targetMemberInfo` variable is introduced which is also present for the instances creation endpoint and used to track the target node information during execution if a concrete target was selected or could be resolved from the given group. Signed-off-by: Julian Pelizäus --- lxd/instance_post.go | 212 +++++++++++++++++++++++++------------------ 1 file changed, 124 insertions(+), 88 deletions(-) diff --git a/lxd/instance_post.go b/lxd/instance_post.go index bcecadc6bdeb..82b7c5298972 100644 --- a/lxd/instance_post.go +++ b/lxd/instance_post.go @@ -87,108 +87,88 @@ func instancePost(d *Daemon, r *http.Request) response.Response { return response.BadRequest(fmt.Errorf("Invalid instance name")) } - targetNode := queryParam(r, "target") - // Flag indicating whether the node running the container is offline. sourceNodeOffline := false - // Flag indicating whether the node the container should be moved to is - // online (only relevant if "?target=" was given). - targetNodeOffline := false - - // A POST to /containers/?target= is meant to be used to - // move a container from one node to another within a cluster. - if targetNode != "" { - // Determine if either the source node (the one currently - // running the container) or the target node are offline. - // - // If the target node is offline, we return an error. - // - // If the source node is offline and the container is backed by - // ceph, we'll just assume that the container is not running - // and it's safe to move it. - // - // TODO: add some sort of "force" flag to the API, to signal - // that the user really wants to move the container even - // if we can't know for sure that it's indeed not - // running? - err := s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { - p, err := dbCluster.GetProject(ctx, tx.Tx(), projectName) - if err != nil { - return fmt.Errorf("Failed loading project: %w", err) - } - - apiProject, err := p.ToAPI(ctx, tx.Tx()) - if err != nil { - return err - } - - // Check if user is allowed to use cluster member targeting - err = project.CheckClusterTargetRestriction(r, apiProject, targetNode) - if err != nil { - return err - } - - // Load target node. - node, err := tx.GetNodeByName(ctx, targetNode) - if err != nil { - return fmt.Errorf("Failed to get target node: %w", err) - } - - targetNodeOffline = node.IsOffline(s.GlobalConfig.OfflineThreshold()) - - // Load source node. - address, err := tx.GetNodeAddressOfInstance(ctx, projectName, name, instanceType) - if err != nil { - return fmt.Errorf("Failed to get address of instance's member: %w", err) - } + // Check if clustered. + clustered, err := cluster.Enabled(s.DB.Node) + if err != nil { + return response.InternalError(fmt.Errorf("Failed checking cluster state: %w", err)) + } - if address == "" { - // Local node. - sourceNodeOffline = false - return nil - } + var targetProject *api.Project + var targetMemberInfo *db.NodeInfo + var candidateMembers []db.NodeInfo - node, err = tx.GetNodeByAddress(ctx, address) - if err != nil { - return fmt.Errorf("Failed to get source member for %s: %w", address, err) - } + target := queryParam(r, "target") + if !clustered && target != "" { + return response.BadRequest(fmt.Errorf("Target only allowed when clustered")) + } - sourceNodeOffline = node.IsOffline(s.GlobalConfig.OfflineThreshold()) + // A POST to /instances/?target= is meant to be used to + // move an instance from one member to another within a cluster. + // + // Determine if either the source node (the one currently + // running the instance) or the target node are offline. + // + // If the target node is offline, we return an error. + // + // If the source node is offline and the instance is backed by + // ceph, we'll just assume that the instance is not running + // and it's safe to move it. + // + // TODO: add some sort of "force" flag to the API, to signal + // that the user really wants to move the instance even + // if we can't know for sure that it's indeed not + // running? + err = s.DB.Cluster.Transaction(r.Context(), func(ctx context.Context, tx *db.ClusterTx) error { + // Load source node. + sourceAddress, err := tx.GetNodeAddressOfInstance(ctx, projectName, name, instanceType) + if err != nil { + return fmt.Errorf("Failed to get address of instance's member: %w", err) + } + if sourceAddress == "" { + // Local node. + sourceNodeOffline = false return nil - }) + } + + sourceMemberInfo, err := tx.GetNodeByAddress(ctx, sourceAddress) if err != nil { - return response.SmartError(err) + return fmt.Errorf("Failed to get source member for %q: %w", sourceAddress, err) } - } - if targetNode != "" && targetNodeOffline { - return response.BadRequest(fmt.Errorf("Target node is offline")) + sourceNodeOffline = sourceMemberInfo.IsOffline(s.GlobalConfig.OfflineThreshold()) + + return nil + }) + if err != nil { + return response.SmartError(err) } // Check whether to forward the request to the node that is running the - // container. Here are the possible cases: + // instance. Here are the possible cases: // - // 1. No "?target=" parameter was passed. In this case this is - // just a container rename, with no move, and we want the request to be - // handled by the node which is actually running the container. + // 1. No "?target=" parameter was passed. In this case this is + // just an instance rename, with no move, and we want the request to be + // handled by the node which is actually running the instance. // - // 2. The "?target=" parameter was set and the node running the - // container is online. In this case we want to forward the request to + // 2. The "?target=" parameter was set and the node running the + // instance is online. In this case we want to forward the request to // that node, which might do things like unmapping the RBD volume for - // ceph containers. + // ceph instances. // - // 3. The "?target=" parameter was set but the node running the - // container is offline. We don't want to forward to the request to - // that node and we don't want to load the container here (since - // it's not a local container): we'll be able to handle the request - // at all only if the container is backed by ceph. We'll check for + // 3. The "?target=" parameter was set but the node running the + // instance is offline. We don't want to forward to the request to + // that node and we don't want to load the instance here (since + // it's not a local instance): we'll be able to handle the request + // at all only if the instance is backed by ceph. We'll check for // that just below. // // Cases 1. and 2. are the ones for which the conditional will be true - // and we'll either forward the request or load the container. - if targetNode == "" || !sourceNodeOffline { + // and we'll either forward the request or load the instance. + if target == "" || !sourceNodeOffline { // Handle requests targeted to a container on a different node. resp, err := forwardedResponseIfInstanceIsRemote(s, r, projectName, name, instanceType) if err != nil { @@ -206,6 +186,67 @@ func instancePost(d *Daemon, r *http.Request) response.Response { } } + inst, err := instance.LoadByProjectAndName(s, projectName, name) + if err != nil { + return response.SmartError(err) + } + + // Run the cluster placement after potentially forwarding the request to another member. + if target != "" && clustered { + err = s.DB.Cluster.Transaction(r.Context(), func(ctx context.Context, tx *db.ClusterTx) error { + p, err := dbCluster.GetProject(ctx, tx.Tx(), projectName) + if err != nil { + return err + } + + targetProject, err = p.ToAPI(ctx, tx.Tx()) + if err != nil { + return err + } + + allMembers, err := tx.GetNodes(ctx) + if err != nil { + return fmt.Errorf("Failed getting cluster members: %w", err) + } + + var targetGroupName string + + targetMemberInfo, targetGroupName, err = project.CheckTarget(ctx, r, tx, targetProject, target, allMembers) + if err != nil { + return err + } + + if targetMemberInfo == nil { + clusterGroupsAllowed := project.GetRestrictedClusterGroups(targetProject) + + candidateMembers, err = tx.GetCandidateMembers(ctx, allMembers, []int{inst.Architecture()}, targetGroupName, clusterGroupsAllowed, s.GlobalConfig.OfflineThreshold()) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + return response.SmartError(err) + } + + // If no member was selected yet, pick the member with the least number of instances. + if targetMemberInfo == nil { + err := s.DB.Cluster.Transaction(r.Context(), func(ctx context.Context, tx *db.ClusterTx) error { + targetMemberInfo, err = tx.GetNodeWithLeastInstances(ctx, candidateMembers) + return err + }) + if err != nil { + return response.SmartError(err) + } + } + + if targetMemberInfo.IsOffline(s.GlobalConfig.OfflineThreshold()) { + return response.BadRequest(fmt.Errorf("Target cluster member is offline")) + } + } + body, err := io.ReadAll(r.Body) if err != nil { return response.InternalError(err) @@ -232,11 +273,6 @@ func instancePost(d *Daemon, r *http.Request) response.Response { req.Live = true } - inst, err := instance.LoadByProjectAndName(s, projectName, name) - if err != nil { - return response.SmartError(err) - } - // If new instance name not supplied, assume it will be keeping its current name. if req.Name == "" { req.Name = inst.Name() @@ -288,7 +324,7 @@ func instancePost(d *Daemon, r *http.Request) response.Response { return operations.OperationResponse(op) } - if targetNode != "" { + if targetMemberInfo != nil { // Check if instance has backups. backups, err := s.DB.Cluster.GetInstanceBackups(projectName, name) if err != nil { @@ -301,7 +337,7 @@ func instancePost(d *Daemon, r *http.Request) response.Response { } run := func(op *operations.Operation) error { - return migrateInstance(s, r, inst, targetNode, req, op) + return migrateInstance(s, r, inst, targetMemberInfo.Name, req, op) } resources := map[string][]api.URL{} From 4f41e8cb91a8e25525676ccfa6768adafb5a7c68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Wed, 14 Jun 2023 08:54:51 +0200 Subject: [PATCH 093/543] tests/cluster: Add tests for instance movement MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- test/main.sh | 1 + test/suites/clustering_move.sh | 141 +++++++++++++++++++++++++++++++++ 2 files changed, 142 insertions(+) create mode 100644 test/suites/clustering_move.sh diff --git a/test/main.sh b/test/main.sh index 1e0da7a3a38d..c8d4c67c5b83 100755 --- a/test/main.sh +++ b/test/main.sh @@ -224,6 +224,7 @@ if [ "${1:-"all"}" != "standalone" ]; then run_test test_clustering_failure_domains "clustering failure domains" run_test test_clustering_image_refresh "clustering image refresh" run_test test_clustering_evacuation "clustering evacuation" + run_test test_clustering_move "clustering move" run_test test_clustering_edit_configuration "clustering config edit" run_test test_clustering_remove_members "clustering config remove members" run_test test_clustering_autotarget "clustering autotarget member" diff --git a/test/suites/clustering_move.sh b/test/suites/clustering_move.sh new file mode 100644 index 000000000000..dd0500d7e1e9 --- /dev/null +++ b/test/suites/clustering_move.sh @@ -0,0 +1,141 @@ +test_clustering_move() { + # shellcheck disable=2039,3043,SC2034 + local LXD_DIR + + setup_clustering_bridge + prefix="lxd$$" + bridge="${prefix}" + + setup_clustering_netns 1 + LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD_ONE_DIR}" + ns1="${prefix}1" + spawn_lxd_and_bootstrap_cluster "${ns1}" "${bridge}" "${LXD_ONE_DIR}" + + # Add a newline at the end of each line. YAML as weird rules.. + cert=$(sed ':a;N;$!ba;s/\n/\n\n/g' "${LXD_ONE_DIR}/cluster.crt") + + # Spawn a second node + setup_clustering_netns 2 + LXD_TWO_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD_TWO_DIR}" + ns2="${prefix}2" + spawn_lxd_and_join_cluster "${ns2}" "${bridge}" "${cert}" 2 1 "${LXD_TWO_DIR}" + + # Spawn a third node + setup_clustering_netns 3 + LXD_THREE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD_THREE_DIR}" + ns3="${prefix}3" + spawn_lxd_and_join_cluster "${ns3}" "${bridge}" "${cert}" 3 1 "${LXD_THREE_DIR}" + + ensure_import_testimage + + # Preparation + LXD_DIR="${LXD_ONE_DIR}" lxc cluster group create foobar1 + LXD_DIR="${LXD_ONE_DIR}" lxc cluster group assign node1 foobar1,default + + LXD_DIR="${LXD_ONE_DIR}" lxc cluster group create foobar2 + LXD_DIR="${LXD_ONE_DIR}" lxc cluster group assign node2 foobar2,default + + LXD_DIR="${LXD_ONE_DIR}" lxc cluster group create foobar3 + LXD_DIR="${LXD_ONE_DIR}" lxc cluster group assign node3 foobar3,default + + LXD_DIR="${LXD_ONE_DIR}" lxc init testimage c1 --target node1 + LXD_DIR="${LXD_ONE_DIR}" lxc init testimage c2 --target node2 + LXD_DIR="${LXD_ONE_DIR}" lxc init testimage c3 --target node3 + + # Perform default move tests falling back to the built in logic of choosing the node + # with the least number of instances when targeting a cluster group. + LXD_DIR="${LXD_ONE_DIR}" lxc move c1 --target node2 + LXD_DIR="${LXD_ONE_DIR}" lxc move c1 --target @foobar1 + LXD_DIR="${LXD_ONE_DIR}" lxc info c1 | grep -q "Location: node1" + + # c1 can be moved within the same cluster group if it has multiple members + LXD_DIR="${LXD_ONE_DIR}" lxc move c1 --target=@default + LXD_DIR="${LXD_ONE_DIR}" lxc move c1 --target=@default + + # c1 cannot be moved within the same cluster group if it has a single member + LXD_DIR="${LXD_ONE_DIR}" lxc move c1 --target=@foobar3 + LXD_DIR="${LXD_ONE_DIR}" lxc info c1 | grep -q "Location: node3" + ! LXD_DIR="${LXD_ONE_DIR}" lxc move c1 --target=@foobar3 || false + + # Perform standard move tests using the `scheduler.instance` cluster member setting. + LXD_DIR="${LXD_ONE_DIR}" lxc cluster set node2 scheduler.instance=group + LXD_DIR="${LXD_ONE_DIR}" lxc cluster set node3 scheduler.instance=manual + + # At this stage we have: + # - node1 in group foobar1,default accepting all instances + # - node2 in group foobar2,default accepting group-only targeting + # - node3 in group foobar3,default accepting manual targeting only + # - c1 is deployed on node1 + # - c2 is deployed on node2 + # - c3 is deployed on node3 + + # c1 can be moved to node2 by group targeting. + LXD_DIR="${LXD_ONE_DIR}" lxc move c1 --target=@foobar2 + LXD_DIR="${LXD_ONE_DIR}" lxc info c1 | grep -q "Location: node2" + + # c2 can be moved to node1 by manual targeting. + LXD_DIR="${LXD_ONE_DIR}" lxc move c2 --target=node1 + LXD_DIR="${LXD_ONE_DIR}" lxc info c2 | grep -q "Location: node1" + + # c1 cannot be moved to node3 by group targeting. + ! LXD_DIR="${LXD_ONE_DIR}" lxc move c1 --target=@foobar3 || false + + # c2 can be moved to node2 by manual targeting. + LXD_DIR="${LXD_ONE_DIR}" lxc move c2 --target=node2 + + # c3 can be moved to node1 by manual targeting. + LXD_DIR="${LXD_ONE_DIR}" lxc move c3 --target=node1 + LXD_DIR="${LXD_ONE_DIR}" lxc info c3 | grep -q "Location: node1" + + # c3 can be moved back to node by by manual targeting. + LXD_DIR="${LXD_ONE_DIR}" lxc move c3 --target=node3 + LXD_DIR="${LXD_ONE_DIR}" lxc info c3 | grep -q "Location: node3" + + # Clean up + LXD_DIR="${LXD_ONE_DIR}" lxc cluster unset node2 scheduler.instance + LXD_DIR="${LXD_ONE_DIR}" lxc cluster unset node3 scheduler.instance + LXD_DIR="${LXD_ONE_DIR}" lxc move c1 --target node1 + + # Perform project restriction tests. + # At this stage we have: + # - node1 in group foobar1,default + # - node2 in group foobar2,default + # - node3 in group foobar3,default + # - c1 is deployed on node1 + # - c2 is deployed on node2 + # - c3 is deployed on node3 + # - default project restricted to cluster groups foobar1,foobar2 + LXD_DIR="${LXD_ONE_DIR}" lxc project set default restricted=true + LXD_DIR="${LXD_ONE_DIR}" lxc project set default restricted.cluster.groups=foobar1,foobar2 + + # Moving to a node that is not a member of foobar1 or foobar2 will fail. + # The same applies for an unlisted group + ! LXD_DIR="${LXD_ONE_DIR}" lxc move c1 --target @foobar3 || false + ! LXD_DIR="${LXD_ONE_DIR}" lxc move c2 --target node3 || false + + # Moving instances in between the restricted groups + LXD_DIR="${LXD_ONE_DIR}" lxc move c1 --target node2 + LXD_DIR="${LXD_ONE_DIR}" lxc move c2 --target @foobar1 + LXD_DIR="${LXD_ONE_DIR}" lxc move c3 --target node1 + + # Cleanup + LXD_DIR="${LXD_ONE_DIR}" lxc delete -f c1 c2 c3 + + LXD_DIR="${LXD_THREE_DIR}" lxd shutdown + LXD_DIR="${LXD_TWO_DIR}" lxd shutdown + LXD_DIR="${LXD_ONE_DIR}" lxd shutdown + sleep 0.5 + rm -f "${LXD_THREE_DIR}/unix.socket" + rm -f "${LXD_TWO_DIR}/unix.socket" + rm -f "${LXD_ONE_DIR}/unix.socket" + + teardown_clustering_netns + teardown_clustering_bridge + + kill_lxd "${LXD_ONE_DIR}" + kill_lxd "${LXD_TWO_DIR}" + kill_lxd "${LXD_THREE_DIR}" +} From 9e64a13c14d1ac8850d0e021baccd974e290097a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Mon, 12 Jun 2023 14:15:40 +0200 Subject: [PATCH 094/543] doc/cluster: Add comment on how to move instances to cluster groups using the target flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- doc/howto/cluster_manage_instance.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/howto/cluster_manage_instance.md b/doc/howto/cluster_manage_instance.md index 263ca03d0097..c4cb6d7e2386 100644 --- a/doc/howto/cluster_manage_instance.md +++ b/doc/howto/cluster_manage_instance.md @@ -37,3 +37,8 @@ For example, to move the instance `c1` to the cluster member `server1`, use the lxc start c1 See {ref}`move-instances` for more information. + +To move an instance to a member of a cluster group, use the group name prefixed with `@` for the `--target` flag. +For example: + + lxc move c1 --target @group1 From 7fc97809bd8d146994237973da0ddadd064f9fcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Fri, 23 Jun 2023 15:30:36 +0200 Subject: [PATCH 095/543] lxc/storage/volume/show: Add missing documentation in help message MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When requesting the configurations of a storage volume, the caller might need to provide its type and/or snapshot. Signed-off-by: Julian Pelizäus --- lxc/storage_volume.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/lxc/storage_volume.go b/lxc/storage_volume.go index 3af7ed86d602..f8d24a5f3c28 100644 --- a/lxc/storage_volume.go +++ b/lxc/storage_volume.go @@ -1794,16 +1794,24 @@ type cmdStorageVolumeShow struct { func (c *cmdStorageVolumeShow) Command() *cobra.Command { cmd := &cobra.Command{} - cmd.Use = usage("show", i18n.G("[:] [/]")) + cmd.Use = usage("show", i18n.G("[:] [/][/]")) cmd.Short = i18n.G("Show storage volume configurations") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Show storage volume configurations`)) cmd.Example = cli.FormatSection("", i18n.G( - `lxc storage volume show default data + `Provide the type of the storage volume if it is not custom. +Supported types are custom, image, container and virtual-machine. + +Add the name of the snapshot if type is one of custom, container or virtual-machine. + +lxc storage volume show default data Will show the properties of a custom volume called "data" in the "default" pool. lxc storage volume show default container/data - Will show the properties of the filesystem for a container called "data" in the "default" pool.`)) + Will show the properties of the filesystem for a container called "data" in the "default" pool. + +lxc storage volume show default virtual-machine/data/snap0 + Will show the properties of snapshot "snap0" for a virtual machine called "data" in the "default" pool.`)) cmd.Flags().StringVar(&c.storage.flagTarget, "target", "", i18n.G("Cluster member name")+"``") cmd.RunE = c.Run From af398763fe5e2b79d76058009ce4ace7392b1127 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Mon, 26 Jun 2023 10:58:54 +0200 Subject: [PATCH 096/543] lxc/storage/volume/edit: Add missing documentation in help message MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When editing the configurations of a storage volume, the caller might need to provide its type. Signed-off-by: Julian Pelizäus --- lxc/storage_volume.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lxc/storage_volume.go b/lxc/storage_volume.go index f8d24a5f3c28..83fae152d105 100644 --- a/lxc/storage_volume.go +++ b/lxc/storage_volume.go @@ -842,12 +842,15 @@ type cmdStorageVolumeEdit struct { func (c *cmdStorageVolumeEdit) Command() *cobra.Command { cmd := &cobra.Command{} - cmd.Use = usage("edit", i18n.G("[:] [/]")) + cmd.Use = usage("edit", i18n.G("[:] [/]")) cmd.Short = i18n.G("Edit storage volume configurations as YAML") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Edit storage volume configurations as YAML`)) cmd.Example = cli.FormatSection("", i18n.G( - `lxc storage volume edit [:] < volume.yaml + `Provide the type of the storage volume if it is not custom. +Supported types are custom, image, container and virtual-machine. + +lxc storage volume edit [:] [/] < volume.yaml Update a storage volume using the content of pool.yaml.`)) cmd.Flags().StringVar(&c.storage.flagTarget, "target", "", i18n.G("Cluster member name")+"``") From e77976fa2d1d87f266985b60ec1e213a5bb96a7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Mon, 26 Jun 2023 11:43:14 +0200 Subject: [PATCH 097/543] lxc/storage/volume/set: Add missing documentation in help message MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When setting configuration keys of a storage volume, the caller might need to provide its type. Signed-off-by: Julian Pelizäus --- lxc/storage_volume.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/lxc/storage_volume.go b/lxc/storage_volume.go index 83fae152d105..95f506479fda 100644 --- a/lxc/storage_volume.go +++ b/lxc/storage_volume.go @@ -1709,13 +1709,22 @@ type cmdStorageVolumeSet struct { func (c *cmdStorageVolumeSet) Command() *cobra.Command { cmd := &cobra.Command{} - cmd.Use = usage("set", i18n.G("[:] =...")) + cmd.Use = usage("set", i18n.G("[:] [/] =...")) cmd.Short = i18n.G("Set storage volume configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Set storage volume configuration keys For backward compatibility, a single configuration key may still be set with: - lxc storage volume set [:] `)) + lxc storage volume set [:] [/] `)) + cmd.Example = cli.FormatSection("", i18n.G( + `Provide the type of the storage volume if it is not custom. +Supported types are custom, image, container and virtual-machine. + +lxc storage volume set default data size=1GiB + Sets the size of a custom volume "data" in pool "default" to 1 GiB. + +lxc storage volume set default virtual-machine/data snapshots.expiry=7d + Sets the snapshot expiration period for a virtual machine "data" in pool "default" to seven days.`)) cmd.Flags().StringVar(&c.storage.flagTarget, "target", "", i18n.G("Cluster member name")+"``") cmd.RunE = c.Run From 303f52edddb3292168fbe2dc57402670df6b1686 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Mon, 26 Jun 2023 11:48:58 +0200 Subject: [PATCH 098/543] lxc/storage/volume/get: Add missing documentation in help message MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When getting configuration keys of a storage volume, the caller might need to provide its type and/or snapshot. Signed-off-by: Julian Pelizäus --- lxc/storage_volume.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/lxc/storage_volume.go b/lxc/storage_volume.go index 95f506479fda..cc2874a66acc 100644 --- a/lxc/storage_volume.go +++ b/lxc/storage_volume.go @@ -1049,10 +1049,21 @@ type cmdStorageVolumeGet struct { func (c *cmdStorageVolumeGet) Command() *cobra.Command { cmd := &cobra.Command{} - cmd.Use = usage("get", i18n.G("[:] [/] ")) + cmd.Use = usage("get", i18n.G("[:] [/][/] ")) cmd.Short = i18n.G("Get values for storage volume configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Get values for storage volume configuration keys`)) + cmd.Example = cli.FormatSection("", i18n.G( + `Provide the type of the storage volume if it is not custom. +Supported types are custom, image, container and virtual-machine. + +Add the name of the snapshot if type is one of custom, container or virtual-machine. + +lxc storage volume get default data size + Returns the size of a custom volume "data" in pool "default". + +lxc storage volume get default virtual-machine/data snapshots.expiry + Returns the snapshot expiration period for a virtual machine "data" in pool "default".`)) cmd.Flags().StringVar(&c.storage.flagTarget, "target", "", i18n.G("Cluster member name")+"``") cmd.RunE = c.Run From 0de9507a38903c673e55f45c2a9fc722b6958978 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Mon, 26 Jun 2023 11:56:50 +0200 Subject: [PATCH 099/543] lxc/storage/volume/info: Add missing documentation in help message MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When getting state information of a storage volume, the caller might need to provide its type. Signed-off-by: Julian Pelizäus --- lxc/storage_volume.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lxc/storage_volume.go b/lxc/storage_volume.go index cc2874a66acc..18397d4e3678 100644 --- a/lxc/storage_volume.go +++ b/lxc/storage_volume.go @@ -1148,10 +1148,19 @@ type cmdStorageVolumeInfo struct { func (c *cmdStorageVolumeInfo) Command() *cobra.Command { cmd := &cobra.Command{} - cmd.Use = usage("info", i18n.G("[:] ")) + cmd.Use = usage("info", i18n.G("[:] [/]")) cmd.Short = i18n.G("Show storage volume state information") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Show storage volume state information`)) + cmd.Example = cli.FormatSection("", i18n.G( + `Provide the type of the storage volume if it is not custom. +Supported types are custom, container and virtual-machine. + +lxc storage volume info default data + Returns state information for a custom volume "data" in pool "default". + +lxc storage volume info default virtual-machine/data + Returns state information for a virtual machine "data" in pool "default".`)) cmd.Flags().StringVar(&c.storage.flagTarget, "target", "", i18n.G("Cluster member name")+"``") cmd.RunE = c.Run From 8bf769afd60d74118a3e4d67b04f17a7e61dfa41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Mon, 26 Jun 2023 12:13:02 +0200 Subject: [PATCH 100/543] lxc/storage/volume/unset: Add missing documentation in help message MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When removing configuration keys of a storage volume, the caller might need to provide its type. Signed-off-by: Julian Pelizäus --- lxc/storage_volume.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lxc/storage_volume.go b/lxc/storage_volume.go index 18397d4e3678..cf4b3f5c251d 100644 --- a/lxc/storage_volume.go +++ b/lxc/storage_volume.go @@ -1933,10 +1933,19 @@ type cmdStorageVolumeUnset struct { func (c *cmdStorageVolumeUnset) Command() *cobra.Command { cmd := &cobra.Command{} - cmd.Use = usage("unset", i18n.G("[:] ")) + cmd.Use = usage("unset", i18n.G("[:] [/] ")) cmd.Short = i18n.G("Unset storage volume configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Unset storage volume configuration keys`)) + cmd.Example = cli.FormatSection("", i18n.G( + `Provide the type of the storage volume if it is not custom. +Supported types are custom, image, container and virtual-machine. + +lxc storage volume unset default data size + Remotes the size/quota of a custom volume "data" in pool "default". + +lxc storage volume unset default virtual-machine/data snapshots.expiry + Removes the snapshot expiration period for a virtual machine "data" in pool "default".`)) cmd.Flags().StringVar(&c.storage.flagTarget, "target", "", i18n.G("Cluster member name")+"``") cmd.RunE = c.Run From 35805e85a7c717f39168c04ab02cd68a717444e8 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Thu, 22 Jun 2023 17:07:33 -0400 Subject: [PATCH 101/543] lxd/storage/util: use a power of 2 unit for address space limit Signed-off-by: Simon Deziel --- lxd/storage/utils.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go index cc9bbae1091f..c56541278ec7 100644 --- a/lxd/storage/utils.go +++ b/lxd/storage/utils.go @@ -473,9 +473,9 @@ func ImageUnpack(imageFile string, vol drivers.Volume, destBlockFile string, blo // Get info about qcow2 file. Force input format to qcow2 so we don't rely on qemu-img's detection // logic as that has been known to have vulnerabilities and we only support qcow2 images anyway. // Use prlimit because qemu-img can consume considerable RAM & CPU time if fed a maliciously - // crafted disk image. Since cloud tenants are not to be trusted, ensure QEMU is limits to 1 GB + // crafted disk image. Since cloud tenants are not to be trusted, ensure QEMU is limits to 1 GiB // address space and 2 seconds CPU time, which ought to be more than enough for real world images. - cmd := []string{"prlimit", "--cpu=2", "--as=1000000000", "qemu-img", "info", "-f", "qcow2", "--output=json", imgPath} + cmd := []string{"prlimit", "--cpu=2", "--as=1073741824", "qemu-img", "info", "-f", "qcow2", "--output=json", imgPath} imgJSON, err := apparmor.QemuImg(sysOS, cmd, imgPath, dstPath) if err != nil { return -1, fmt.Errorf("Failed reading image info %q: %w", imgPath, err) From 130cfb1ac2e6967fb8da8d2116bd5b21a3f44d11 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Thu, 22 Jun 2023 17:16:42 -0400 Subject: [PATCH 102/543] test/*: use a power of 2 units for memory/storage Signed-off-by: Simon Deziel --- test/suites/container_devices_disk.sh | 2 +- test/suites/projects.sh | 4 ++-- test/suites/storage_driver_cephfs.sh | 4 +++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/test/suites/container_devices_disk.sh b/test/suites/container_devices_disk.sh index 1f4f8c0b0518..3cde33a9ae47 100644 --- a/test/suites/container_devices_disk.sh +++ b/test/suites/container_devices_disk.sh @@ -106,7 +106,7 @@ test_container_devices_disk_ceph() { RBD_POOL_NAME=lxdtest-$(basename "${LXD_DIR}")-disk ceph osd pool create "${RBD_POOL_NAME}" 1 - rbd create --pool "${RBD_POOL_NAME}" --size 50M my-volume + rbd create --pool "${RBD_POOL_NAME}" --size 50MiB my-volume RBD_DEVICE=$(rbd map --pool "${RBD_POOL_NAME}" my-volume) mkfs.ext4 -m0 "${RBD_DEVICE}" rbd unmap "${RBD_DEVICE}" diff --git a/test/suites/projects.sh b/test/suites/projects.sh index 1cbea1137a75..0d77a5d935a4 100644 --- a/test/suites/projects.sh +++ b/test/suites/projects.sh @@ -710,13 +710,13 @@ test_projects_limits() { # aggregate project's limit is not possible. ! lxc profile device set default root size=160MiB || false ! lxc config device set c2 root size 110MiB || false - ! lxc storage volume set "${pool}" v1 size 110MiB || false + ! lxc storage volume set "${pool}" v1 size 110MiB # Can't create a custom volume without specifying a size. ! lxc storage volume create "${pool}" v2 || false # Disk limits can be updated if they stay within limits. - lxc project set p1 limits.disk 204900KiB + lxc project set p1 limits.disk 200100kB lxc profile device set default root size=90MiB lxc config device set c2 root size 60MiB diff --git a/test/suites/storage_driver_cephfs.sh b/test/suites/storage_driver_cephfs.sh index 233f415e1586..8ef29fc2d03b 100644 --- a/test/suites/storage_driver_cephfs.sh +++ b/test/suites/storage_driver_cephfs.sh @@ -17,7 +17,9 @@ test_storage_driver_cephfs() { # Creation, rename and deletion lxc storage volume create cephfs vol1 - lxc storage volume set cephfs vol1 size 100MiB + if [ "$(uname -r | cut -d. -f1)" -gt 4 ]; then + lxc storage volume set cephfs vol1 size 100MiB + fi lxc storage volume rename cephfs vol1 vol2 lxc storage volume copy cephfs/vol2 cephfs/vol1 lxc storage volume delete cephfs vol1 From 0aaf9048d16c73da643c473e813368b744d9c58d Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Tue, 27 Jun 2023 10:47:19 +0200 Subject: [PATCH 103/543] lxc/delete: Allow deleting multiple snapshots Fixes #11893 Signed-off-by: Thomas Hipp --- lxc/delete.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lxc/delete.go b/lxc/delete.go index 6f785ebab116..42f66e0d0b5b 100644 --- a/lxc/delete.go +++ b/lxc/delete.go @@ -100,7 +100,12 @@ func (c *cmdDelete) Run(cmd *cobra.Command, args []string) error { } if shared.IsSnapshot(resource.name) { - return c.doDelete(resource.server, resource.name) + err := c.doDelete(resource.server, resource.name) + if err != nil { + return err + } + + continue } ct, _, err := resource.server.GetInstance(resource.name) From 2e53fa8a161953ec0174fe4dc5b1d9a3f739dcc6 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Tue, 27 Jun 2023 11:00:31 +0200 Subject: [PATCH 104/543] test: Test deleting multiple instance snapshots Signed-off-by: Thomas Hipp --- test/suites/snapshots.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/test/suites/snapshots.sh b/test/suites/snapshots.sh index 18cd9920b188..f502770371fe 100644 --- a/test/suites/snapshots.sh +++ b/test/suites/snapshots.sh @@ -54,6 +54,13 @@ snapshots() { [ ! -d "${LXD_DIR}/snapshots/foo/snap0" ] fi + # test deleting multiple snapshots + lxc snapshot foo snap2 + lxc snapshot foo snap3 + lxc delete foo/snap2 foo/snap3 + ! lxc info foo | grep -q snap2 || false + ! lxc info foo | grep -q snap3 || false + # no CLI for this, so we use the API directly (rename a snapshot) wait_for "${LXD_ADDR}" my_curl -X POST "https://${LXD_ADDR}/1.0/containers/foo/snapshots/tester" -d "{\"name\":\"tester2\"}" # FIXME: make this backend agnostic From b48d47636db40169e42d0d58b9ce5c741c1df827 Mon Sep 17 00:00:00 2001 From: Ruth Fuchss Date: Tue, 27 Jun 2023 14:35:45 +0200 Subject: [PATCH 105/543] doc: exclude pages from search index Exclude the two cheat sheets from the search index. Since this only excludes the pages, but not the _objects_ in the page, also exclude the config options cheat sheet from processing. Signed-off-by: Ruth Fuchss --- doc/conf.py | 2 +- doc/config_options_cheat_sheet.md | 6 ++++++ doc/doc-cheat-sheet.md | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index ec566efc49e6..2f5b771410ed 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -144,7 +144,7 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['html', 'README.md', '.sphinx'] +exclude_patterns = ['html', 'README.md', '.sphinx', 'config_options_cheat_sheet.md'] # Open Graph configuration diff --git a/doc/config_options_cheat_sheet.md b/doc/config_options_cheat_sheet.md index b845357114ca..58be1c0e7bb5 100644 --- a/doc/config_options_cheat_sheet.md +++ b/doc/config_options_cheat_sheet.md @@ -1,9 +1,15 @@ --- orphan: true +nosearch: true --- # Configuration options +```{important} +This page shows how to output configuration option documentation. +The content in this page is for demonstration purposes only. +``` + Some instance options: ```{config:option} agent.nic_config instance diff --git a/doc/doc-cheat-sheet.md b/doc/doc-cheat-sheet.md index 88a1e5e55754..4e996584ae20 100644 --- a/doc/doc-cheat-sheet.md +++ b/doc/doc-cheat-sheet.md @@ -1,5 +1,6 @@ --- orphan: true +nosearch: true myst: substitutions: reuse_key: "This is **included** text." From 8252157513ab0156381d4ba2e0e53c049d2f746c Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Tue, 30 May 2023 11:09:25 +0200 Subject: [PATCH 106/543] lxd/storage/drivers: Don't add filesystem suffix to VM images Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_zfs_utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/storage/drivers/driver_zfs_utils.go b/lxd/storage/drivers/driver_zfs_utils.go index f48ff7dd940c..bf8d6edd6916 100644 --- a/lxd/storage/drivers/driver_zfs_utils.go +++ b/lxd/storage/drivers/driver_zfs_utils.go @@ -34,7 +34,7 @@ const ( func (d *zfs) dataset(vol Volume, deleted bool) string { name, snapName, _ := api.GetParentAndSnapshotName(vol.name) - if vol.volType == VolumeTypeImage && d.isBlockBacked(vol) { + if vol.volType == VolumeTypeImage && vol.contentType == ContentTypeFS && d.isBlockBacked(vol) { name = fmt.Sprintf("%s_%s", name, vol.ConfigBlockFilesystem()) } From 1f2523995ad001398a40f4cc0a3c24c09c83fa48 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Tue, 30 May 2023 15:27:18 +0200 Subject: [PATCH 107/543] lxd/storage/drivers: Drop block options from VM block volumes This drops block mode related options from VM block volumes. Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_zfs_volumes.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lxd/storage/drivers/driver_zfs_volumes.go b/lxd/storage/drivers/driver_zfs_volumes.go index 8019fffad05f..0f5b8fdeefe4 100644 --- a/lxd/storage/drivers/driver_zfs_volumes.go +++ b/lxd/storage/drivers/driver_zfs_volumes.go @@ -3204,11 +3204,10 @@ func (d *zfs) FillVolumeConfig(vol Volume) error { return err } - // Only validate filesystem config keys for filesystem volumes or VM block volumes (which have an - // associated filesystem volume). - if d.isBlockBacked(vol) && vol.ContentType() == ContentTypeFS || vol.IsVMBlock() { + // Only validate filesystem config keys for filesystem volumes. + if d.isBlockBacked(vol) && vol.ContentType() == ContentTypeFS { // Inherit block mode from pool if not set. - if d.isBlockBacked(vol) && vol.ContentType() == ContentTypeFS && vol.config["zfs.block_mode"] == "" { + if vol.config["zfs.block_mode"] == "" { vol.config["zfs.block_mode"] = d.config["volume.zfs.block_mode"] } From 94ee22255b590374600956787f87fb357e8a015f Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Wed, 31 May 2023 14:37:50 +0200 Subject: [PATCH 108/543] lxd/patches: Drop filesystem suffix from ZFS image volumes Signed-off-by: Thomas Hipp --- lxd/patches.go | 1 + 1 file changed, 1 insertion(+) diff --git a/lxd/patches.go b/lxd/patches.go index 59123a3d7cff..29d28600009d 100644 --- a/lxd/patches.go +++ b/lxd/patches.go @@ -71,6 +71,7 @@ var patches = []patch{ {name: "dnsmasq_entries_include_device_name", stage: patchPostDaemonStorage, run: patchDnsmasqEntriesIncludeDeviceName}, {name: "storage_missing_snapshot_records", stage: patchPostDaemonStorage, run: patchGenericStorage}, {name: "storage_delete_old_snapshot_records", stage: patchPostDaemonStorage, run: patchGenericStorage}, + {name: "storage_zfs_drop_block_volume_filesystem_extension", stage: patchPostDaemonStorage, run: patchGenericStorage}, } type patch struct { From 8ad200c26c55a03f78fc07c4eb9f60231f24ff99 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Tue, 27 Jun 2023 14:33:34 +0200 Subject: [PATCH 109/543] lxd/storage/drivers: Drop FS suffix on VM image block volumes Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_btrfs.go | 7 ++-- lxd/storage/drivers/driver_ceph.go | 7 ++-- lxd/storage/drivers/driver_cephfs.go | 7 ++-- lxd/storage/drivers/driver_cephobject.go | 7 ++-- lxd/storage/drivers/driver_dir.go | 7 ++-- lxd/storage/drivers/driver_lvm.go | 7 ++-- lxd/storage/drivers/driver_zfs.go | 43 ++++++++++++++++++++++-- 7 files changed, 64 insertions(+), 21 deletions(-) diff --git a/lxd/storage/drivers/driver_btrfs.go b/lxd/storage/drivers/driver_btrfs.go index 26101adf921a..5c8ed0c66b8e 100644 --- a/lxd/storage/drivers/driver_btrfs.go +++ b/lxd/storage/drivers/driver_btrfs.go @@ -32,9 +32,10 @@ type btrfs struct { func (d *btrfs) load() error { // Register the patches. d.patches = map[string]func() error{ - "storage_lvm_skipactivation": nil, - "storage_missing_snapshot_records": nil, - "storage_delete_old_snapshot_records": nil, + "storage_lvm_skipactivation": nil, + "storage_missing_snapshot_records": nil, + "storage_delete_old_snapshot_records": nil, + "storage_zfs_drop_block_volume_filesystem_extension": nil, } // Done if previously loaded. diff --git a/lxd/storage/drivers/driver_ceph.go b/lxd/storage/drivers/driver_ceph.go index 84b44bd76c5d..ab4c12fec2df 100644 --- a/lxd/storage/drivers/driver_ceph.go +++ b/lxd/storage/drivers/driver_ceph.go @@ -29,9 +29,10 @@ type ceph struct { func (d *ceph) load() error { // Register the patches. d.patches = map[string]func() error{ - "storage_lvm_skipactivation": nil, - "storage_missing_snapshot_records": nil, - "storage_delete_old_snapshot_records": nil, + "storage_lvm_skipactivation": nil, + "storage_missing_snapshot_records": nil, + "storage_delete_old_snapshot_records": nil, + "storage_zfs_drop_block_volume_filesystem_extension": nil, } // Done if previously loaded. diff --git a/lxd/storage/drivers/driver_cephfs.go b/lxd/storage/drivers/driver_cephfs.go index 5fee0abe3dc9..3999a0a05322 100644 --- a/lxd/storage/drivers/driver_cephfs.go +++ b/lxd/storage/drivers/driver_cephfs.go @@ -26,9 +26,10 @@ type cephfs struct { func (d *cephfs) load() error { // Register the patches. d.patches = map[string]func() error{ - "storage_lvm_skipactivation": nil, - "storage_missing_snapshot_records": nil, - "storage_delete_old_snapshot_records": nil, + "storage_lvm_skipactivation": nil, + "storage_missing_snapshot_records": nil, + "storage_delete_old_snapshot_records": nil, + "storage_zfs_drop_block_volume_filesystem_extension": nil, } // Done if previously loaded. diff --git a/lxd/storage/drivers/driver_cephobject.go b/lxd/storage/drivers/driver_cephobject.go index c34af5a2995d..987b1fc82ec4 100644 --- a/lxd/storage/drivers/driver_cephobject.go +++ b/lxd/storage/drivers/driver_cephobject.go @@ -28,9 +28,10 @@ type cephobject struct { func (d *cephobject) load() error { // Register the patches. d.patches = map[string]func() error{ - "storage_lvm_skipactivation": nil, - "storage_missing_snapshot_records": nil, - "storage_delete_old_snapshot_records": nil, + "storage_lvm_skipactivation": nil, + "storage_missing_snapshot_records": nil, + "storage_delete_old_snapshot_records": nil, + "storage_zfs_drop_block_volume_filesystem_extension": nil, } // Done if previously loaded. diff --git a/lxd/storage/drivers/driver_dir.go b/lxd/storage/drivers/driver_dir.go index ba0011f2d2e9..4f19a6f6a0f5 100644 --- a/lxd/storage/drivers/driver_dir.go +++ b/lxd/storage/drivers/driver_dir.go @@ -20,9 +20,10 @@ type dir struct { func (d *dir) load() error { // Register the patches. d.patches = map[string]func() error{ - "storage_lvm_skipactivation": nil, - "storage_missing_snapshot_records": nil, - "storage_delete_old_snapshot_records": nil, + "storage_lvm_skipactivation": nil, + "storage_missing_snapshot_records": nil, + "storage_delete_old_snapshot_records": nil, + "storage_zfs_drop_block_volume_filesystem_extension": nil, } return nil diff --git a/lxd/storage/drivers/driver_lvm.go b/lxd/storage/drivers/driver_lvm.go index 16a2f62cea9c..e00365427137 100644 --- a/lxd/storage/drivers/driver_lvm.go +++ b/lxd/storage/drivers/driver_lvm.go @@ -31,9 +31,10 @@ type lvm struct { func (d *lvm) load() error { // Register the patches. d.patches = map[string]func() error{ - "storage_lvm_skipactivation": d.patchStorageSkipActivation, - "storage_missing_snapshot_records": nil, - "storage_delete_old_snapshot_records": nil, + "storage_lvm_skipactivation": d.patchStorageSkipActivation, + "storage_missing_snapshot_records": nil, + "storage_delete_old_snapshot_records": nil, + "storage_zfs_drop_block_volume_filesystem_extension": nil, } // Done if previously loaded. diff --git a/lxd/storage/drivers/driver_zfs.go b/lxd/storage/drivers/driver_zfs.go index 3d83672d7371..ca14061a5575 100644 --- a/lxd/storage/drivers/driver_zfs.go +++ b/lxd/storage/drivers/driver_zfs.go @@ -44,9 +44,10 @@ type zfs struct { func (d *zfs) load() error { // Register the patches. d.patches = map[string]func() error{ - "storage_lvm_skipactivation": nil, - "storage_missing_snapshot_records": nil, - "storage_delete_old_snapshot_records": nil, + "storage_lvm_skipactivation": nil, + "storage_missing_snapshot_records": nil, + "storage_delete_old_snapshot_records": nil, + "storage_zfs_drop_block_volume_filesystem_extension": d.patchDropBlockVolumeFilesystemExtension, } // Done if previously loaded. @@ -664,3 +665,39 @@ func (d *zfs) MigrationTypes(contentType ContentType, refresh bool, copySnapshot }, } } + +// patchDropBlockVolumeFilesystemExtension removes the filesystem extension (e.g _ext4) from VM image block volumes. +func (d *zfs) patchDropBlockVolumeFilesystemExtension() error { + poolName, ok := d.config["zfs.pool_name"] + if !ok { + poolName = d.name + } + + out, err := shared.RunCommand("zfs", "list", "-H", "-r", "-o", "name", "-t", "volume", fmt.Sprintf("%s/images", poolName)) + if err != nil { + return fmt.Errorf("Failed listing images: %w", err) + } + + for _, volume := range strings.Split(out, "\n") { + fields := strings.SplitN(volume, "/", 3) + + if len(fields) != 3 { + continue + } + + // Ignore non-block images, and images without filesystem extension + if !strings.HasSuffix(fields[2], ".block") || !strings.Contains(fields[2], "_") { + continue + } + + // Rename zfs dataset. Snapshots will automatically be renamed. + newName := fmt.Sprintf("%s/images/%s.block", poolName, strings.Split(fields[2], "_")[0]) + + _, err = shared.RunCommand("zfs", "rename", volume, newName) + if err != nil { + return fmt.Errorf("Failed renaming zfs dataset: %w", err) + } + } + + return nil +} From 18fbd73b8f49c86d127d3066ea37a3ee5ffae25c Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Tue, 27 Jun 2023 16:55:45 +0100 Subject: [PATCH 110/543] lxd: Fix deviceEventListener resource scheduling when joining cluster By passing deviceEventListener a function that can get a fresh copy of state, rather than using a long held version, which can become out of date. This manifested itself in no container CPU scheduling being run for instances launched on a server that had just joined a cluster. This was because the ServerName property in the state.State object deviceEventListener was passed on LXD start up contained "none" but after the server had joined the cluster its ServerName changed to its cluster member name. This then meant that the instance.LoadNodeAll() call used inside deviceEventListener was filtering instances that were on server "none", of which there weren't any because the new instances were associated with the cluster member's name. This then meant that those instances did not get their CPU scheduling configured until LXD was next reloaded. Signed-off-by: Thomas Parrott --- lxd/daemon.go | 2 +- lxd/devices.go | 14 +++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/lxd/daemon.go b/lxd/daemon.go index 05c256fcffeb..4431e54a2969 100644 --- a/lxd/daemon.go +++ b/lxd/daemon.go @@ -1354,7 +1354,7 @@ func (d *Daemon) init() error { if !d.os.MockMode { // Start the scheduler - go deviceEventListener(d.State()) + go deviceEventListener(d.State) prefixPath := os.Getenv("LXD_DEVMONITOR_DIR") if prefixPath == "" { diff --git a/lxd/devices.go b/lxd/devices.go index 1e9a2135354b..cdd03e4b33b4 100644 --- a/lxd/devices.go +++ b/lxd/devices.go @@ -656,7 +656,9 @@ func deviceNetworkPriority(s *state.State, netif string) { } } -func deviceEventListener(s *state.State) { +// deviceEventListener starts the event listener for resource scheduling. +// Accepts stateFunc which will be called each time it needs a fresh state.State. +func deviceEventListener(stateFunc func() *state.State) { chNetlinkCPU, chNetlinkNetwork, chUSB, chUnix, err := deviceNetlinkListener() if err != nil { logger.Errorf("scheduler: Couldn't setup netlink listener: %v", err) @@ -671,6 +673,8 @@ func deviceEventListener(s *state.State) { continue } + s := stateFunc() + if !s.OS.CGInfo.Supports(cgroup.CPUSet, nil) { continue } @@ -683,6 +687,8 @@ func deviceEventListener(s *state.State) { continue } + s := stateFunc() + if !s.OS.CGInfo.Supports(cgroup.NetPrio, nil) { continue } @@ -695,15 +701,17 @@ func deviceEventListener(s *state.State) { } case e := <-chUSB: - device.USBRunHandlers(s, &e) + device.USBRunHandlers(stateFunc(), &e) case e := <-chUnix: - device.UnixHotplugRunHandlers(s, &e) + device.UnixHotplugRunHandlers(stateFunc(), &e) case e := <-cgroup.DeviceSchedRebalance: if len(e) != 3 { logger.Errorf("Scheduler: received an invalid rebalance event") continue } + s := stateFunc() + if !s.OS.CGInfo.Supports(cgroup.CPUSet, nil) { continue } From fd6a8ad11def32972dc2a67f5ffc915252a8116a Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Wed, 28 Jun 2023 10:55:48 +0200 Subject: [PATCH 111/543] lxd/instance/drivers: Ensure root disk device When updating an instance, it's possible to override the devices in such a way that it lacks a root disk device. This adds a root device check before finalizing the update so that an instance will always have a root disk device. Fixes #11900 Signed-off-by: Thomas Hipp --- lxd/instance/drivers/driver_lxc.go | 5 +++++ lxd/instance/drivers/driver_qemu.go | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go index ed7524f34377..fb7e002afe82 100644 --- a/lxd/instance/drivers/driver_lxc.go +++ b/lxd/instance/drivers/driver_lxc.go @@ -4200,6 +4200,11 @@ func (d *lxc) Update(args db.InstanceArgs, userRequested bool) error { if oldErr == nil && newErr == nil && oldRootDev["pool"] != newRootDev["pool"] { return fmt.Errorf("Cannot update root disk device pool name to %q", newRootDev["pool"]) } + + // Ensure the instance has a root disk. + if newErr != nil { + return fmt.Errorf("Invalid root disk device: %w", newErr) + } } // Run through initLXC to catch anything we missed diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index e3e524a289fa..7da85707ae75 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -4908,6 +4908,11 @@ func (d *qemu) Update(args db.InstanceArgs, userRequested bool) error { if oldErr == nil && newErr == nil && oldRootDev["pool"] != newRootDev["pool"] { return fmt.Errorf("Cannot update root disk device pool name to %q", newRootDev["pool"]) } + + // Ensure the instance has a root disk. + if newErr != nil { + return fmt.Errorf("Invalid root disk device: %w", newErr) + } } // If apparmor changed, re-validate the apparmor profile (even if not running). From 1cb16899064b158fe801927e8793f73c3b584eda Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Wed, 28 Jun 2023 11:16:42 +0200 Subject: [PATCH 112/543] test: Test assigning empty profile to instance Signed-off-by: Thomas Hipp --- test/suites/basic.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/test/suites/basic.sh b/test/suites/basic.sh index 3040064c3fc2..1523f834f215 100644 --- a/test/suites/basic.sh +++ b/test/suites/basic.sh @@ -618,4 +618,11 @@ test_basic_usage() { lxc storage volume delete bla vol1 lxc storage volume delete bla vol2 lxc storage delete bla + + # Test assigning an empty profile (with no root disk device) to an instance. + lxc init testimage c1 + lxc profile create foo + ! lxc profile assign c1 foo || false + lxc profile delete foo + lxc delete -f c1 } From ef89c239020b6a5b5e4081c93b531ee2772b6bbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Wed, 28 Jun 2023 12:28:15 +0200 Subject: [PATCH 113/543] lxd/instance/drivers/qemu: Use uint32 for vsock Context ID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When creating a new HTTPClient from lxd/vsock use an actual uint32 for the Context ID. Signed-off-by: Julian Pelizäus --- lxd-agent/api_1.0.go | 4 ++-- lxd-agent/devlxd.go | 2 +- lxd/instance/drivers/driver_qemu.go | 10 +++++----- lxd/instance/drivers/driver_qemu_templates.go | 2 +- lxd/vsock/vsock.go | 6 +++--- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/lxd-agent/api_1.0.go b/lxd-agent/api_1.0.go index a5e83bc75a9f..ae2230dccfc0 100644 --- a/lxd-agent/api_1.0.go +++ b/lxd-agent/api_1.0.go @@ -98,7 +98,7 @@ func api10Put(d *Daemon, r *http.Request) response.Response { } // Try connecting to LXD server. - client, err := getClient(int(d.serverCID), int(d.serverPort), d.serverCertificate) + client, err := getClient(d.serverCID, int(d.serverPort), d.serverCertificate) if err != nil { return response.ErrorResponse(http.StatusInternalServerError, err.Error()) } @@ -171,7 +171,7 @@ func stopDevlxdServer(d *Daemon) error { return servers["devlxd"].Close() } -func getClient(CID int, port int, serverCertificate string) (*http.Client, error) { +func getClient(CID uint32, port int, serverCertificate string) (*http.Client, error) { agentCert, err := os.ReadFile("agent.crt") if err != nil { return nil, err diff --git a/lxd-agent/devlxd.go b/lxd-agent/devlxd.go index f12b13b2095a..f088c8eb2af1 100644 --- a/lxd-agent/devlxd.go +++ b/lxd-agent/devlxd.go @@ -43,7 +43,7 @@ type devLxdHandler struct { func getVsockClient(d *Daemon) (lxd.InstanceServer, error) { // Try connecting to LXD server. - client, err := getClient(int(d.serverCID), int(d.serverPort), d.serverCertificate) + client, err := getClient(d.serverCID, int(d.serverPort), d.serverCertificate) if err != nil { return nil, err } diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 7da85707ae75..f1efe4fee06a 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -380,9 +380,9 @@ func (d *qemu) getAgentClient() (*http.Client, error) { // This allows a running VM to be recovered after DB record deletion and that agent connection still work // after the VM's instance ID has changed. if d.localConfig["volatile.vsock_id"] != "" { - volatileVsockID, err := strconv.Atoi(d.localConfig["volatile.vsock_id"]) + volatileVsockID, err := strconv.ParseUint(d.localConfig["volatile.vsock_id"], 10, 32) if err == nil { - vsockID = volatileVsockID + vsockID = uint32(volatileVsockID) } } @@ -1146,7 +1146,7 @@ func (d *qemu) start(stateful bool, op *operationlock.InstanceOperation) error { // Update vsock ID in volatile if needed for recovery (do this before UpdateBackupFile() call). oldVsockID := d.localConfig["volatile.vsock_id"] - newVsockID := strconv.Itoa(d.vsockID()) + newVsockID := strconv.FormatUint(uint64(d.vsockID()), 10) if oldVsockID != newVsockID { volatileSet["volatile.vsock_id"] = newVsockID } @@ -7258,7 +7258,7 @@ func (d *qemu) DeviceEventHandler(runConf *deviceConfig.RunConfig) error { } // vsockID returns the vsock Context ID for the VM. -func (d *qemu) vsockID() int { +func (d *qemu) vsockID() uint32 { // We use the system's own VsockID as the base. // // This is either "2" for a physical system or the VM's own id if @@ -7272,7 +7272,7 @@ func (d *qemu) vsockID() int { // unique, non-clashing context ID for our guest. info := DriverStatuses()[instancetype.VM].Info - return info.Features["vhost_vsock"].(int) + 1 + d.id + return uint32(info.Features["vhost_vsock"].(int) + 1 + d.id) } // InitPID returns the instance's current process ID. diff --git a/lxd/instance/drivers/driver_qemu_templates.go b/lxd/instance/drivers/driver_qemu_templates.go index 30fc62e1fbb1..1b64bf414d80 100644 --- a/lxd/instance/drivers/driver_qemu_templates.go +++ b/lxd/instance/drivers/driver_qemu_templates.go @@ -299,7 +299,7 @@ func qemuRNG(opts *qemuDevOpts) []cfgSection { type qemuVsockOpts struct { dev qemuDevOpts - vsockID int + vsockID uint32 } func qemuVsock(opts *qemuVsockOpts) []cfgSection { diff --git a/lxd/vsock/vsock.go b/lxd/vsock/vsock.go index a88604ab77e9..02b782141f9a 100644 --- a/lxd/vsock/vsock.go +++ b/lxd/vsock/vsock.go @@ -23,7 +23,7 @@ func Dial(cid, port uint32) (net.Conn, error) { } // HTTPClient provides an HTTP client for using over vsock. -func HTTPClient(vsockID int, port int, tlsClientCert string, tlsClientKey string, tlsServerCert string) (*http.Client, error) { +func HTTPClient(vsockID uint32, port int, tlsClientCert string, tlsClientKey string, tlsServerCert string) (*http.Client, error) { client := &http.Client{} // Get the TLS configuration. @@ -41,7 +41,7 @@ func HTTPClient(vsockID int, port int, tlsClientCert string, tlsClientKey string // Retry for up to 1s at 100ms interval to handle various failures. for i := 0; i < 10; i++ { - conn, err = Dial(uint32(vsockID), uint32(port)) + conn, err = Dial(vsockID, uint32(port)) if err == nil { break } else { @@ -49,7 +49,7 @@ func HTTPClient(vsockID int, port int, tlsClientCert string, tlsClientKey string msg := err.Error() if strings.Contains(msg, "connection timed out") { // Retry once. - conn, err = Dial(uint32(vsockID), uint32(port)) + conn, err = Dial(vsockID, uint32(port)) break } else if strings.Contains(msg, "connection refused") { break From 5c323181b170ef415148fcd205d892264983ec7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Wed, 28 Jun 2023 13:11:42 +0200 Subject: [PATCH 114/543] lxd/instance/drivers/qemu: Add function to retrieve the vsock Context ID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As a client retrieving the already existing vsock Context ID for a VM is now handled by a call to getVsockID() Signed-off-by: Julian Pelizäus --- lxd/instance/drivers/driver_qemu.go | 37 ++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index f1efe4fee06a..9bbb8e887770 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -374,16 +374,10 @@ func (d *qemu) getAgentClient() (*http.Client, error) { return nil, err } - vsockID := d.vsockID() // Default to using the vsock ID that will be used on next start. - - // But if vsock ID from last VM start is present in volatile, then use that. - // This allows a running VM to be recovered after DB record deletion and that agent connection still work - // after the VM's instance ID has changed. - if d.localConfig["volatile.vsock_id"] != "" { - volatileVsockID, err := strconv.ParseUint(d.localConfig["volatile.vsock_id"], 10, 32) - if err == nil { - vsockID = uint32(volatileVsockID) - } + // Existing vsock ID from volatile. + vsockID, err := d.getVsockID() + if err != nil { + return nil, err } agent, err := lxdvsock.HTTPClient(vsockID, shared.HTTPSDefaultPort, clientCert, clientKey, agentCert) @@ -2849,6 +2843,12 @@ func (d *qemu) generateQemuConfigFile(cpuInfo *cpuTopology, mountInfo *storagePo cfg = append(cfg, qemuTablet(&tabletOpts)...) + // Existing vsock ID from volatile. + vsockID, err := d.getVsockID() + if err != nil { + return "", nil, err + } + devBus, devAddr, multi = bus.allocate(busFunctionGroupGeneric) vsockOpts := qemuVsockOpts{ dev: qemuDevOpts{ @@ -2857,7 +2857,7 @@ func (d *qemu) generateQemuConfigFile(cpuInfo *cpuTopology, mountInfo *storagePo devAddr: devAddr, multifunction: multi, }, - vsockID: d.vsockID(), + vsockID: vsockID, } cfg = append(cfg, qemuVsock(&vsockOpts)...) @@ -7257,6 +7257,21 @@ func (d *qemu) DeviceEventHandler(runConf *deviceConfig.RunConfig) error { return nil } +// getVsockID returns the vsock Context ID for the VM. +func (d *qemu) getVsockID() (uint32, error) { + existingVsockID, ok := d.localConfig["volatile.vsock_id"] + if ok { + vsockID, err := strconv.ParseUint(existingVsockID, 10, 32) + if err != nil { + return 0, fmt.Errorf("Failed to parse volatile.vsock_id: %q: %w", existingVsockID, err) + } + + return uint32(vsockID), nil + } + + return 0, fmt.Errorf("Context ID not set in volatile.vsock_id") +} + // vsockID returns the vsock Context ID for the VM. func (d *qemu) vsockID() uint32 { // We use the system's own VsockID as the base. From 14e4f2fc1444cfc0a5023f8c3de1a1e15eb293ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Wed, 28 Jun 2023 13:17:47 +0200 Subject: [PATCH 115/543] lxd/instance/drivers/qemu: Pick a random vsock Context ID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When acquiring a new Context ID for the communication via vsock, use the UUID of the instance as a seed for generating random uint32 candidates. The loop is kept open until a free Context ID is found or the timeout of 5s is reached. The syscall to the vsock returns ENODEV in case the Context ID is not yet assigned. In case the Context ID of a stopped VM was already acquired again, a new one gets picked. Removes the `vhost_vsock` feature since the value isn't anymore accessed. Fixes lxc#11508 Signed-off-by: Julian Pelizäus --- lxd/instance/drivers/driver_qemu.go | 105 +++++++++++++++++++++------- 1 file changed, 81 insertions(+), 24 deletions(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 9bbb8e887770..62f1c096cb70 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -1138,9 +1138,15 @@ func (d *qemu) start(stateful bool, op *operationlock.InstanceOperation) error { volatileSet := make(map[string]string) + // New or existing vsock ID from volatile. + vsockID, err := d.nextVsockID() + if err != nil { + return err + } + // Update vsock ID in volatile if needed for recovery (do this before UpdateBackupFile() call). oldVsockID := d.localConfig["volatile.vsock_id"] - newVsockID := strconv.FormatUint(uint64(d.vsockID()), 10) + newVsockID := strconv.FormatUint(uint64(vsockID), 10) if oldVsockID != newVsockID { volatileSet["volatile.vsock_id"] = newVsockID } @@ -7257,6 +7263,12 @@ func (d *qemu) DeviceEventHandler(runConf *deviceConfig.RunConfig) error { return nil } +// reservedVsockID returns true if the given vsockID equals 0, 1 or 2. +// Those are reserved and we cannot use them. +func (d *qemu) reservedVsockID(vsockID uint32) bool { + return vsockID <= 2 +} + // getVsockID returns the vsock Context ID for the VM. func (d *qemu) getVsockID() (uint32, error) { existingVsockID, ok := d.localConfig["volatile.vsock_id"] @@ -7266,28 +7278,81 @@ func (d *qemu) getVsockID() (uint32, error) { return 0, fmt.Errorf("Failed to parse volatile.vsock_id: %q: %w", existingVsockID, err) } + if d.reservedVsockID(uint32(vsockID)) { + return 0, fmt.Errorf("Failed to use reserved vsock Context ID: %q", vsockID) + } + return uint32(vsockID), nil } return 0, fmt.Errorf("Context ID not set in volatile.vsock_id") } -// vsockID returns the vsock Context ID for the VM. -func (d *qemu) vsockID() uint32 { - // We use the system's own VsockID as the base. - // - // This is either "2" for a physical system or the VM's own id if - // running inside of a VM. - // - // To this we add 1 for backward compatibility with prior logic - // which would start at id 3 rather than id 2. Removing that offset - // would cause conflicts between existing VMs until they're all rebooted. - // - // We then add the VM's own instance id (1 or higher) to give us a - // unique, non-clashing context ID for our guest. +// freeVsockID returns true if the given vsockID is not yet acquired. +func (d *qemu) freeVsockID(vsockID uint32) bool { + c, err := lxdvsock.Dial(vsockID, shared.HTTPSDefaultPort) + if err != nil { + var unixErrno unix.Errno - info := DriverStatuses()[instancetype.VM].Info - return uint32(info.Features["vhost_vsock"].(int) + 1 + d.id) + if !errors.As(err, &unixErrno) { + return false + } + + if unixErrno == unix.ENODEV { + // The syscall to the vsock device returned "no such device". + // This means the address (Context ID) is free. + return true + } + } + + // Address is already in use. + c.Close() + return false +} + +// nextVsockID returns the next free vsock Context ID for the VM. +// It tries to acquire one randomly until the timeout exceeds. +func (d *qemu) nextVsockID() (uint32, error) { + // Check if vsock ID from last VM start is present in volatile, then use that. + // This allows a running VM to be recovered after DB record deletion and that an agent connection still works + // after the VM's instance ID has changed. + // Continue in case of error since the caller requires a valid vsockID in any case. + vsockID, err := d.getVsockID() + if err == nil { + // Check if the vsock ID from last VM start is still not acquired in case the VM was stopped. + if d.freeVsockID(vsockID) { + return vsockID, nil + } + } + + instanceUUID := uuid.Parse(d.localConfig["volatile.uuid"]) + if instanceUUID == nil { + return 0, fmt.Errorf("Failed to parse instance UUID from volatile.uuid") + } + + r, err := util.GetStableRandomGenerator(instanceUUID.String()) + if err != nil { + return 0, fmt.Errorf("Failed generating stable random seed from instance UUID %q: %w", instanceUUID, err) + } + + timeout := 5 * time.Second + + // Try to find a new Context ID. + for start := time.Now(); time.Since(start) <= timeout; { + candidateVsockID := r.Uint32() + + if d.reservedVsockID(candidateVsockID) { + continue + } + + if d.freeVsockID(candidateVsockID) { + return candidateVsockID, nil + } + + continue + } + + return 0, fmt.Errorf("Timeout exceeded whilst trying to acquire the next vsock Context ID") } // InitPID returns the instance's current process ID. @@ -7798,14 +7863,6 @@ func (d *qemu) checkFeatures(hostArch int, qemuPath string) (map[string]any, err features["vhost_net"] = struct{}{} } - vsockID, err := vsock.ContextID() - if err != nil || vsockID > 2147483647 { - // Fallback to the default ID for a host system - features["vhost_vsock"] = vsock.Host - } else { - features["vhost_vsock"] = int(vsockID) - } - return features, nil } From 9c3ed45f8e71b8a08f103c579193657cbfaf00da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Thu, 29 Jun 2023 12:29:11 +0200 Subject: [PATCH 116/543] lxd/storage/drivers/btrfs: Add reverter for volume snapshotting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes a condition were subsequent snapshot attempts fail due to the root subvolume being already there Signed-off-by: Julian Pelizäus --- lxd/storage/drivers/driver_btrfs_utils.go | 22 ++++++++--- lxd/storage/drivers/driver_btrfs_volumes.go | 42 ++++++++++++++------- 2 files changed, 46 insertions(+), 18 deletions(-) diff --git a/lxd/storage/drivers/driver_btrfs_utils.go b/lxd/storage/drivers/driver_btrfs_utils.go index 6b7447d28772..0c798ed63cdb 100644 --- a/lxd/storage/drivers/driver_btrfs_utils.go +++ b/lxd/storage/drivers/driver_btrfs_utils.go @@ -18,6 +18,7 @@ import ( "gopkg.in/yaml.v2" "github.com/canonical/lxd/lxd/backup" + "github.com/canonical/lxd/lxd/revert" "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" "github.com/canonical/lxd/shared/ioprogress" @@ -125,7 +126,10 @@ func (d *btrfs) getSubvolumes(path string) ([]string, error) { // snapshotSubvolume creates a snapshot of the specified path at the dest supplied. If recursion is true and // sub volumes are found below the path then they are created at the relative location in dest. -func (d *btrfs) snapshotSubvolume(path string, dest string, recursion bool) error { +func (d *btrfs) snapshotSubvolume(path string, dest string, recursion bool) (revert.Hook, error) { + revert := revert.New() + defer revert.Fail() + // Single subvolume deletion. snapshot := func(path string, dest string) error { _, err := shared.RunCommand("btrfs", "subvolume", "snapshot", path, dest) @@ -133,13 +137,19 @@ func (d *btrfs) snapshotSubvolume(path string, dest string, recursion bool) erro return err } + revert.Add(func() { + // Don't delete recursive since there already is a revert hook + // for each subvolume that got created. + _ = d.deleteSubvolume(dest, false) + }) + return nil } // First snapshot the root. err := snapshot(path, dest) if err != nil { - return err + return nil, err } // Now snapshot all subvolumes of the root. @@ -147,7 +157,7 @@ func (d *btrfs) snapshotSubvolume(path string, dest string, recursion bool) erro // Get the subvolumes list. subSubVols, err := d.getSubvolumes(path) if err != nil { - return err + return nil, err } sort.Strings(subSubVols) @@ -160,12 +170,14 @@ func (d *btrfs) snapshotSubvolume(path string, dest string, recursion bool) erro err := snapshot(filepath.Join(path, subSubVol), subSubVolSnapPath) if err != nil { - return err + return nil, err } } } - return nil + cleanup := revert.Clone().Fail + revert.Success() + return cleanup, nil } func (d *btrfs) deleteSubvolume(rootPath string, recursion bool) error { diff --git a/lxd/storage/drivers/driver_btrfs_volumes.go b/lxd/storage/drivers/driver_btrfs_volumes.go index 6ad1172a53cc..e091d95b5f89 100644 --- a/lxd/storage/drivers/driver_btrfs_volumes.go +++ b/lxd/storage/drivers/driver_btrfs_volumes.go @@ -405,12 +405,14 @@ func (d *btrfs) CreateVolumeFromCopy(vol Volume, srcVol Volume, copySnapshots bo target := vol.MountPath() // Recursively copy the main volume. - err = d.snapshotSubvolume(srcVol.MountPath(), target, true) + cleanup, err := d.snapshotSubvolume(srcVol.MountPath(), target, true) if err != nil { return err } - revert.Add(func() { _ = d.deleteSubvolume(target, true) }) + if cleanup != nil { + revert.Add(cleanup) + } // Restore readonly property on subvolumes in reverse order (except root which should be left writable). subVolCount := len(subVols) @@ -463,11 +465,15 @@ func (d *btrfs) CreateVolumeFromCopy(vol Volume, srcVol Volume, copySnapshots bo srcSnapshot := GetVolumeMountPath(d.name, srcVol.volType, GetSnapshotVolumeName(srcVol.name, snapName)) dstSnapshot := GetVolumeMountPath(d.name, vol.volType, GetSnapshotVolumeName(vol.name, snapName)) - err = d.snapshotSubvolume(srcSnapshot, dstSnapshot, true) + cleanup, err := d.snapshotSubvolume(srcSnapshot, dstSnapshot, true) if err != nil { return err } + if cleanup != nil { + revert.Add(cleanup) + } + err = d.setSubvolumeReadonlyProperty(dstSnapshot, true) if err != nil { return err @@ -1182,14 +1188,14 @@ func (d *btrfs) readonlySnapshot(vol Volume) (string, revert.Hook, error) { mountPath := filepath.Join(tmpDir, vol.name) - err = d.snapshotSubvolume(sourcePath, mountPath, true) + cleanup, err := d.snapshotSubvolume(sourcePath, mountPath, true) if err != nil { return "", nil, err } - revert.Add(func() { - _ = d.deleteSubvolume(mountPath, true) - }) + if cleanup != nil { + revert.Add(cleanup) + } err = d.setSubvolumeReadonlyProperty(mountPath, true) if err != nil { @@ -1198,7 +1204,7 @@ func (d *btrfs) readonlySnapshot(vol Volume) (string, revert.Hook, error) { d.logger.Debug("Created read-only backup snapshot", logger.Ctx{"sourcePath": sourcePath, "path": mountPath}) - cleanup := revert.Clone().Fail + cleanup = revert.Clone().Fail revert.Success() return mountPath, cleanup, nil } @@ -1440,7 +1446,7 @@ func (d *btrfs) migrateVolumeOptimized(vol Volume, conn io.ReadWriteCloser, volS // Make recursive read-only snapshot of the subvolume as writable subvolumes cannot be sent. migrationSendSnapshotPrefix := filepath.Join(tmpVolumesMountPoint, ".migration-send") - err = d.snapshotSubvolume(vol.MountPath(), migrationSendSnapshotPrefix, true) + _, err = d.snapshotSubvolume(vol.MountPath(), migrationSendSnapshotPrefix, true) if err != nil { return err } @@ -1669,7 +1675,7 @@ func (d *btrfs) BackupVolume(vol Volume, tarWriter *instancewriter.InstanceTarWr // Create the read-only snapshot. targetVolume := fmt.Sprintf("%s/.backup", tmpInstanceMntPoint) - err = d.snapshotSubvolume(sourceVolume, targetVolume, true) + _, err = d.snapshotSubvolume(sourceVolume, targetVolume, true) if err != nil { return err } @@ -1719,11 +1725,18 @@ func (d *btrfs) CreateVolumeSnapshot(snapVol Volume, op *operations.Operation) e return err } - err = d.snapshotSubvolume(srcPath, snapPath, true) + revert := revert.New() + defer revert.Fail() + + cleanup, err := d.snapshotSubvolume(srcPath, snapPath, true) if err != nil { return err } + if cleanup != nil { + revert.Add(cleanup) + } + err = d.setSubvolumeReadonlyProperty(snapPath, true) if err != nil { return err @@ -1745,6 +1758,7 @@ func (d *btrfs) CreateVolumeSnapshot(snapVol Volume, op *operations.Operation) e } } + revert.Success() return nil } @@ -1876,12 +1890,14 @@ func (d *btrfs) RestoreVolume(vol Volume, snapshotName string, op *operations.Op revert.Add(func() { _ = os.Rename(backupSubvolume, target) }) // Restore the snapshot. - err = d.snapshotSubvolume(srcVol.MountPath(), target, true) + cleanup, err := d.snapshotSubvolume(srcVol.MountPath(), target, true) if err != nil { return err } - revert.Add(func() { _ = d.deleteSubvolume(target, true) }) + if cleanup != nil { + revert.Add(cleanup) + } // Restore readonly property on subvolumes in reverse order (except root which should be left writable). subVolCount := len(subVols) From 8a2d52be1fc73d70d4e3adc8ce68da4371229203 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Thu, 29 Jun 2023 14:42:29 +0200 Subject: [PATCH 117/543] lxd/storage/drivers/btrfs: Change misleading comment in snapshot creation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/storage/drivers/driver_btrfs_utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/storage/drivers/driver_btrfs_utils.go b/lxd/storage/drivers/driver_btrfs_utils.go index 0c798ed63cdb..1b81db1524b9 100644 --- a/lxd/storage/drivers/driver_btrfs_utils.go +++ b/lxd/storage/drivers/driver_btrfs_utils.go @@ -130,7 +130,7 @@ func (d *btrfs) snapshotSubvolume(path string, dest string, recursion bool) (rev revert := revert.New() defer revert.Fail() - // Single subvolume deletion. + // Single subvolume creation. snapshot := func(path string, dest string) error { _, err := shared.RunCommand("btrfs", "subvolume", "snapshot", path, dest) if err != nil { From 96e1d13496eb4720b09170e3c0c7b24dd7289eff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Thu, 29 Jun 2023 15:12:16 +0200 Subject: [PATCH 118/543] lxd/storage/drivers/btrfs: Expect volatile files when performing snapshots MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For doing a snapshot, subvolumes of the root get discovered by walking the roots directory. However this causes an issue if filepath.Walk probes volatile files from the actual filesystem. Since lstat is called on all the files filepath.Walk has loaded into memory beforehand, it returns an error for already deleted files. filepath.WalkDir does not call lstat on the discovered files and directories. See the docstring of filepath.WalkDir for more information. Signed-off-by: Julian Pelizäus --- lxd/storage/drivers/driver_btrfs_utils.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lxd/storage/drivers/driver_btrfs_utils.go b/lxd/storage/drivers/driver_btrfs_utils.go index 1b81db1524b9..98a96c8a66b2 100644 --- a/lxd/storage/drivers/driver_btrfs_utils.go +++ b/lxd/storage/drivers/driver_btrfs_utils.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "io" + "io/fs" "os" "os/exec" "path/filepath" @@ -95,7 +96,7 @@ func (d *btrfs) getSubvolumes(path string) ([]string, error) { } // Walk through the entire tree looking for subvolumes. - err := filepath.Walk(path, func(fpath string, fi os.FileInfo, err error) error { + err := filepath.WalkDir(path, func(fpath string, entry fs.DirEntry, err error) error { if err != nil { return err } @@ -106,7 +107,7 @@ func (d *btrfs) getSubvolumes(path string) ([]string, error) { } // Subvolumes can only be directories. - if !fi.IsDir() { + if !entry.IsDir() { return nil } From 537f59af2baebd5e946a953377e1d91091d564a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Thu, 29 Jun 2023 17:37:07 +0200 Subject: [PATCH 119/543] lxd/storage/drivers/btrfs: Mark btrfsIsSubVolume as deprecated MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/storage/drivers/utils.go | 1 + 1 file changed, 1 insertion(+) diff --git a/lxd/storage/drivers/utils.go b/lxd/storage/drivers/utils.go index 45db34a621cb..d85c018afc13 100644 --- a/lxd/storage/drivers/utils.go +++ b/lxd/storage/drivers/utils.go @@ -705,6 +705,7 @@ func BTRFSSubVolumesGet(path string) ([]string, error) { return result, nil } +// Deprecated: Use IsSubvolume from the Btrfs driver instead. // btrfsIsSubvolume checks if a given path is a subvolume. func btrfsIsSubVolume(subvolPath string) bool { fs := unix.Stat_t{} From b934da7d00670310e0ae82c23843d19bbee3a134 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Thu, 29 Jun 2023 17:59:09 +0200 Subject: [PATCH 120/543] lxd/storage/drivers/btrfs: Expect volatile files when performing snapshots MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/storage/drivers/utils.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/lxd/storage/drivers/utils.go b/lxd/storage/drivers/utils.go index d85c018afc13..e9cf58fdf4d7 100644 --- a/lxd/storage/drivers/utils.go +++ b/lxd/storage/drivers/utils.go @@ -3,6 +3,7 @@ package drivers import ( "fmt" "io" + "io/fs" "os" "os/exec" "path/filepath" @@ -677,24 +678,24 @@ func BTRFSSubVolumesGet(path string) ([]string, error) { path = path + "/" } - // Unprivileged users can't get to fs internals - _ = filepath.Walk(path, func(fpath string, fi os.FileInfo, err error) error { + // Unprivileged users can't get to fs internals. + _ = filepath.WalkDir(path, func(fpath string, entry fs.DirEntry, err error) error { // Skip walk errors if err != nil { return nil } - // Ignore the base path + // Ignore the base path. if strings.TrimRight(fpath, "/") == strings.TrimRight(path, "/") { return nil } - // Subvolumes can only be directories - if !fi.IsDir() { + // Subvolumes can only be directories. + if !entry.IsDir() { return nil } - // Check if a btrfs subvolume + // Check if a btrfs subvolume. if btrfsIsSubVolume(fpath) { result = append(result, strings.TrimPrefix(fpath, path)) } From b34ff1d48f651fb0332d3d3e531c69d150dffb25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Thu, 29 Jun 2023 18:03:54 +0200 Subject: [PATCH 121/543] lxd/storage/drivers/btrfs: Use IsSubvolume consistently MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/storage/drivers/driver_btrfs_volumes.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lxd/storage/drivers/driver_btrfs_volumes.go b/lxd/storage/drivers/driver_btrfs_volumes.go index e091d95b5f89..f1d680b18987 100644 --- a/lxd/storage/drivers/driver_btrfs_volumes.go +++ b/lxd/storage/drivers/driver_btrfs_volumes.go @@ -1344,7 +1344,7 @@ func (d *btrfs) migrateVolumeOptimized(vol Volume, conn io.ReadWriteCloser, volS // Detect if parent subvolume exists, and if so use it for differential. parentPath := "" - if parentPrefix != "" && btrfsIsSubVolume(filepath.Join(parentPrefix, subVolume.Path)) { + if parentPrefix != "" && d.isSubvolume(filepath.Join(parentPrefix, subVolume.Path)) { parentPath = filepath.Join(parentPrefix, subVolume.Path) // Set parent subvolume readonly if needed so we can send the subvolume. @@ -1579,7 +1579,7 @@ func (d *btrfs) BackupVolume(vol Volume, tarWriter *instancewriter.InstanceTarWr // Detect if parent subvolume exists, and if so use it for differential. parentPath := "" - if parentPrefix != "" && btrfsIsSubVolume(filepath.Join(parentPrefix, subVolume.Path)) { + if parentPrefix != "" && d.isSubvolume(filepath.Join(parentPrefix, subVolume.Path)) { parentPath = filepath.Join(parentPrefix, subVolume.Path) // Set parent subvolume readonly if needed so we can add the subvolume. From 57526401149603bb1b1d620c379c1a968fa2d52e Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Thu, 15 Jun 2023 12:19:42 +0200 Subject: [PATCH 122/543] lxd/db: Add new content type "iso" Signed-off-by: Thomas Hipp --- lxd/db/storage_volumes.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go index 9b0857b935fc..74adeff50b73 100644 --- a/lxd/db/storage_volumes.go +++ b/lxd/db/storage_volumes.go @@ -591,12 +591,14 @@ var StoragePoolVolumeTypeNames = map[int]string{ const ( StoragePoolVolumeContentTypeFS = iota StoragePoolVolumeContentTypeBlock + StoragePoolVolumeContentTypeISO ) // Content type names. const ( StoragePoolVolumeContentTypeNameFS string = "filesystem" StoragePoolVolumeContentTypeNameBlock string = "block" + StoragePoolVolumeContentTypeNameISO string = "iso" ) // StorageVolumeArgs is a value object holding all db-related details about a @@ -876,6 +878,8 @@ func storagePoolVolumeContentTypeToName(contentType int) (string, error) { return StoragePoolVolumeContentTypeNameFS, nil case StoragePoolVolumeContentTypeBlock: return StoragePoolVolumeContentTypeNameBlock, nil + case StoragePoolVolumeContentTypeISO: + return StoragePoolVolumeContentTypeNameISO, nil } return "", fmt.Errorf("Invalid storage volume content type") From 1c2156adb0fc31d1f63f87f4dff44ed9aa37e1bf Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Mon, 19 Jun 2023 11:37:10 +0200 Subject: [PATCH 123/543] lxd/storage: Add ContentTypeISO Signed-off-by: Thomas Hipp --- lxd/storage/drivers/volume.go | 3 +++ lxd/storage/utils.go | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/lxd/storage/drivers/volume.go b/lxd/storage/drivers/volume.go index 2f6d0e461357..6115f12ec991 100644 --- a/lxd/storage/drivers/volume.go +++ b/lxd/storage/drivers/volume.go @@ -68,6 +68,9 @@ const ContentTypeFS = ContentType("filesystem") // know which filesystem(s) (if any) are in use. const ContentTypeBlock = ContentType("block") +// ContentTypeISO indicates the volume will be an ISO which is read-only, and uses the ISO 9660 filesystem. +const ContentTypeISO = ContentType("iso") + // VolumePostHook function returned from a storage action that should be run later to complete the action. type VolumePostHook func(vol Volume) error diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go index c56541278ec7..b98be199da5e 100644 --- a/lxd/storage/utils.go +++ b/lxd/storage/utils.go @@ -149,6 +149,8 @@ func VolumeContentTypeToDBContentType(contentType drivers.ContentType) (int, err return db.StoragePoolVolumeContentTypeBlock, nil case drivers.ContentTypeFS: return db.StoragePoolVolumeContentTypeFS, nil + case drivers.ContentTypeISO: + return db.StoragePoolVolumeContentTypeISO, nil } return -1, fmt.Errorf("Invalid volume content type") @@ -161,6 +163,8 @@ func VolumeDBContentTypeToContentType(volDBType int) (drivers.ContentType, error return drivers.ContentTypeBlock, nil case db.StoragePoolVolumeContentTypeFS: return drivers.ContentTypeFS, nil + case db.StoragePoolVolumeContentTypeISO: + return drivers.ContentTypeISO, nil } return "", fmt.Errorf("Invalid volume content type") @@ -173,6 +177,8 @@ func VolumeContentTypeNameToContentType(contentTypeName string) (int, error) { return db.StoragePoolVolumeContentTypeFS, nil case db.StoragePoolVolumeContentTypeNameBlock: return db.StoragePoolVolumeContentTypeBlock, nil + case db.StoragePoolVolumeContentTypeNameISO: + return db.StoragePoolVolumeContentTypeISO, nil } return -1, fmt.Errorf("Invalid volume content type name") From bf75c2c66de3d75afbe578bfe3ef18f4baffb04e Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Thu, 29 Jun 2023 13:52:28 +0200 Subject: [PATCH 124/543] lxd/storage/drivers: Add IsContentBlock() This adds a new function which returns true if the content type is either block or iso. Signed-off-by: Thomas Hipp --- lxd/storage/drivers/utils.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lxd/storage/drivers/utils.go b/lxd/storage/drivers/utils.go index e9cf58fdf4d7..990a0a4d2d07 100644 --- a/lxd/storage/drivers/utils.go +++ b/lxd/storage/drivers/utils.go @@ -871,3 +871,8 @@ func wipeBlockHeaders(path string) error { return nil } + +// IsContentBlock returns true if the content type is either block or iso. +func IsContentBlock(contentType ContentType) bool { + return contentType == ContentTypeBlock || contentType == ContentTypeISO +} From 6524a777fed80a4b13a11e625d46eec28d97f7d4 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Mon, 19 Jun 2023 12:09:30 +0200 Subject: [PATCH 125/543] lxd/storage/drivers: Handle volume disk path of ContentTypeISO Signed-off-by: Thomas Hipp --- lxd/storage/drivers/generic_vfs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/storage/drivers/generic_vfs.go b/lxd/storage/drivers/generic_vfs.go index 9f8d3a1885fb..850c627ec355 100644 --- a/lxd/storage/drivers/generic_vfs.go +++ b/lxd/storage/drivers/generic_vfs.go @@ -460,7 +460,7 @@ func genericVFSHasVolume(vol Volume) (bool, error) { // genericVFSGetVolumeDiskPath is a generic GetVolumeDiskPath implementation for VFS-only drivers. func genericVFSGetVolumeDiskPath(vol Volume) (string, error) { - if vol.contentType != ContentTypeBlock { + if !IsContentBlock(vol.contentType) { return "", ErrNotSupported } From b420fabf45772d1527deb79e7f82343de12fe597 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Mon, 19 Jun 2023 12:05:49 +0200 Subject: [PATCH 126/543] lxd/storage: Add CreateCustomVolumeFromISO Signed-off-by: Thomas Hipp --- lxd/storage/backend_lxd.go | 88 +++++++++++++++++++++++++++++++++++ lxd/storage/backend_mock.go | 4 ++ lxd/storage/pool_interface.go | 1 + 3 files changed, 93 insertions(+) diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go index 1bb6d2e7dc67..e259258c79d6 100644 --- a/lxd/storage/backend_lxd.go +++ b/lxd/storage/backend_lxd.go @@ -1573,6 +1573,22 @@ func (b *lxdBackend) imageFiller(fingerprint string, op *operations.Operation) f } } +// isoFiller returns a function that can be used as a filler function with CreateVolume(). +// The function returned will copy the ISO content into the specified mount path +// provided. +func (b *lxdBackend) isoFiller(data io.Reader) func(vol drivers.Volume, rootBlockPath string, allowUnsafeResize bool) (int64, error) { + return func(vol drivers.Volume, rootBlockPath string, allowUnsafeResize bool) (int64, error) { + f, err := os.OpenFile(rootBlockPath, os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return -1, err + } + + defer func() { _ = f.Close() }() + + return io.Copy(f, data) + } +} + // CreateInstanceFromImage creates a new volume for an instance populated with the image requested. // On failure caller is expected to call DeleteInstance() to clean up. func (b *lxdBackend) CreateInstanceFromImage(inst instance.Instance, fingerprint string, op *operations.Operation) error { @@ -5524,6 +5540,78 @@ func (b *lxdBackend) BackupCustomVolume(projectName string, volName string, tarW return nil } +func (b *lxdBackend) CreateCustomVolumeFromISO(projectName string, volName string, srcData io.ReadSeeker, size int64, op *operations.Operation) error { + l := b.logger.AddContext(logger.Ctx{"project": projectName, "volume": volName}) + l.Debug("CreateCustomVolumeFromISO started") + defer l.Debug("CreateCustomVolumeFromISO finished") + + // Check whether we are allowed to create volumes. + req := api.StorageVolumesPost{ + Name: volName, + } + + err := b.state.DB.Cluster.Transaction(b.state.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error { + return project.AllowVolumeCreation(tx, projectName, req) + }) + if err != nil { + return fmt.Errorf("Failed checking volume creation allowed: %w", err) + } + + revert := revert.New() + defer revert.Fail() + + // Get the volume name on storage. + volStorageName := project.StorageVolume(projectName, volName) + + config := map[string]string{ + "size": fmt.Sprintf("%d", size), + } + + vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentTypeISO, volStorageName, config) + + volExists, err := b.driver.HasVolume(vol) + if err != nil { + return err + } + + if volExists { + return fmt.Errorf("Cannot create volume, already exists on target storage") + } + + // Validate config and create database entry for new storage volume. + err = VolumeDBCreate(b, projectName, volName, "", vol.Type(), false, vol.Config(), time.Now(), time.Time{}, vol.ContentType(), true, true) + if err != nil { + return fmt.Errorf("Failed creating database entry for custom volume: %w", err) + } + + revert.Add(func() { _ = VolumeDBDelete(b, projectName, volName, vol.Type()) }) + + _, err = srcData.Seek(0, io.SeekStart) + if err != nil { + return err + } + + volFiller := drivers.VolumeFiller{ + Fill: b.isoFiller(srcData), + } + + // Unpack the ISO into the new storage volume(s). + err = b.driver.CreateVolume(vol, &volFiller, op) + if err != nil { + return fmt.Errorf("Failed creating volume: %w", err) + } + + eventCtx := logger.Ctx{"type": vol.Type()} + if !b.Driver().Info().Remote { + eventCtx["location"] = b.state.ServerName + } + + b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeCreated.Event(vol, string(vol.Type()), projectName, op, eventCtx)) + + revert.Success() + return nil +} + func (b *lxdBackend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) error { l := b.logger.AddContext(logger.Ctx{"project": srcBackup.Project, "volume": srcBackup.Name, "snapshots": srcBackup.Snapshots, "optimizedStorage": *srcBackup.OptimizedStorage}) l.Debug("CreateCustomVolumeFromBackup started") diff --git a/lxd/storage/backend_mock.go b/lxd/storage/backend_mock.go index d83a47eddf46..10db499b7093 100644 --- a/lxd/storage/backend_mock.go +++ b/lxd/storage/backend_mock.go @@ -313,3 +313,7 @@ func (b *mockBackend) BackupCustomVolume(projectName string, volName string, tar func (b *mockBackend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) error { return nil } + +func (b *mockBackend) CreateCustomVolumeFromISO(projectName string, volName string, srcData io.ReadSeeker, size int64, op *operations.Operation) error { + return nil +} diff --git a/lxd/storage/pool_interface.go b/lxd/storage/pool_interface.go index 614bf172cf67..e8fcf575a373 100644 --- a/lxd/storage/pool_interface.go +++ b/lxd/storage/pool_interface.go @@ -105,6 +105,7 @@ type Pool interface { ImportCustomVolume(projectName string, poolVol *backupConfig.Config, op *operations.Operation) (revert.Hook, error) RefreshCustomVolume(projectName string, srcProjectName string, volName, desc string, config map[string]string, srcPoolName, srcVolName string, snapshots bool, op *operations.Operation) error GenerateCustomVolumeBackupConfig(projectName string, volName string, snapshots bool, op *operations.Operation) (*backupConfig.Config, error) + CreateCustomVolumeFromISO(projectName string, volName string, srcData io.ReadSeeker, size int64, op *operations.Operation) error // Custom volume snapshots. CreateCustomVolumeSnapshot(projectName string, volName string, newSnapshotName string, newExpiryDate time.Time, op *operations.Operation) error From 6638b40ed42c0968e92fa249faef4065db24a6f5 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Mon, 19 Jun 2023 12:40:01 +0200 Subject: [PATCH 127/543] lxd/storage: Disallow some actions on ContentTypeISO volumes This disables snapshots, backups (exports), config changes, and refresh on custom ISO volumes. Signed-off-by: Thomas Hipp --- lxd/storage/backend_lxd.go | 52 ++++++++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 13 deletions(-) diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go index e259258c79d6..6d4839379a3a 100644 --- a/lxd/storage/backend_lxd.go +++ b/lxd/storage/backend_lxd.go @@ -1142,10 +1142,13 @@ func (b *lxdBackend) RefreshCustomVolume(projectName string, srcProjectName stri } // Get the source volume's content type. - contentType := drivers.ContentTypeFS + contentType, err := VolumeDBContentTypeToContentType(contentDBType) + if err != nil { + return err + } - if contentDBType == db.StoragePoolVolumeContentTypeBlock { - contentType = drivers.ContentTypeBlock + if contentType != drivers.ContentTypeFS && contentType != drivers.ContentTypeBlock { + return fmt.Errorf("Volume of content type %q cannot be refreshed", contentType) } storagePoolSupported := false @@ -4133,6 +4136,11 @@ func (b *lxdBackend) UpdateCustomVolume(projectName string, volName string, newD // Apply config changes if there are any. changedConfig, userOnly := b.detectChangedConfig(curVol.Config, newConfig) if len(changedConfig) != 0 { + // Forbid changing the config for ISO custom volumes as they are read-only. + if contentType == drivers.ContentTypeISO { + return fmt.Errorf("Custom ISO volume config cannot be changed") + } + // Check that the volume's block.filesystem property isn't being changed. if changedConfig["block.filesystem"] != "" { return fmt.Errorf("Custom volume 'block.filesystem' property cannot be changed") @@ -4506,28 +4514,32 @@ func (b *lxdBackend) CreateCustomVolumeSnapshot(projectName, volName string, new return err } - revert := revert.New() - defer revert.Fail() - - // Validate config and create database entry for new storage volume. - // Copy volume config from parent. - err = VolumeDBCreate(b, projectName, fullSnapshotName, parentVol.Description, drivers.VolumeTypeCustom, true, parentVol.Config, newExpiryDate, drivers.ContentType(parentVol.ContentType), false, true) + volDBContentType, err := VolumeContentTypeNameToContentType(parentVol.ContentType) if err != nil { return err } - revert.Add(func() { _ = VolumeDBDelete(b, projectName, fullSnapshotName, drivers.VolumeTypeCustom) }) - - volDBContentType, err := VolumeContentTypeNameToContentType(parentVol.ContentType) + contentType, err := VolumeDBContentTypeToContentType(volDBContentType) if err != nil { return err } - contentType, err := VolumeDBContentTypeToContentType(volDBContentType) + if contentType != drivers.ContentTypeFS && contentType != drivers.ContentTypeBlock { + return fmt.Errorf("Volume of content type %q does not support snapshots", contentType) + } + + revert := revert.New() + defer revert.Fail() + + // Validate config and create database entry for new storage volume. + // Copy volume config from parent. + err = VolumeDBCreate(b, projectName, fullSnapshotName, parentVol.Description, drivers.VolumeTypeCustom, true, parentVol.Config, newExpiryDate, drivers.ContentType(parentVol.ContentType), false, true) if err != nil { return err } + revert.Add(func() { _ = VolumeDBDelete(b, projectName, fullSnapshotName, drivers.VolumeTypeCustom) }) + // Get the volume name on storage. volStorageName := project.StorageVolume(projectName, fullSnapshotName) vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, parentVol.Config) @@ -5515,6 +5527,20 @@ func (b *lxdBackend) BackupCustomVolume(projectName string, volName string, tarW return err } + contentDBType, err := VolumeContentTypeNameToContentType(volume.ContentType) + if err != nil { + return err + } + + contentType, err := VolumeDBContentTypeToContentType(contentDBType) + if err != nil { + return err + } + + if contentType != drivers.ContentTypeFS && contentType != drivers.ContentTypeBlock { + return fmt.Errorf("Volume of content type %q cannot be backed up", contentType) + } + var snapNames []string if snapshots { // Get snapshots in age order, oldest first, and pass names to storage driver. From 6c8e40750195c2b37e944c317f8d0c3ab9d97034 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Wed, 21 Jun 2023 16:18:27 +0200 Subject: [PATCH 128/543] lxd/storage: Support copying ISO storage volumes Signed-off-by: Thomas Hipp --- lxd/storage/backend_lxd.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go index 6d4839379a3a..439af86c3050 100644 --- a/lxd/storage/backend_lxd.go +++ b/lxd/storage/backend_lxd.go @@ -3498,10 +3498,9 @@ func (b *lxdBackend) CreateCustomVolumeFromCopy(projectName string, srcProjectNa } // Get the source volume's content type. - contentType := drivers.ContentTypeFS - - if contentDBType == db.StoragePoolVolumeContentTypeBlock { - contentType = drivers.ContentTypeBlock + contentType, err := VolumeDBContentTypeToContentType(contentDBType) + if err != nil { + return err } storagePoolSupported := false @@ -3599,7 +3598,7 @@ func (b *lxdBackend) CreateCustomVolumeFromCopy(projectName string, srcProjectNa // "no space left on device". var volSize int64 - if contentType == drivers.ContentTypeBlock { + if drivers.IsContentBlock(contentType) { err = srcVol.MountTask(func(mountPath string, op *operations.Operation) error { srcPoolBackend, ok := srcPool.(*lxdBackend) if !ok { From 5192d11f0158d04ba3f7ee71f5392c1961c07ac7 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Thu, 22 Jun 2023 18:21:47 +0200 Subject: [PATCH 129/543] lxd/storage: Handle migration of custom ISO volumes Signed-off-by: Thomas Hipp --- lxd/storage/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go index b98be199da5e..274202fc0d44 100644 --- a/lxd/storage/utils.go +++ b/lxd/storage/utils.go @@ -862,7 +862,7 @@ func VolumeUsedByDaemon(s *state.State, poolName string, volumeName string) (boo // FallbackMigrationType returns the fallback migration transport to use based on volume content type. func FallbackMigrationType(contentType drivers.ContentType) migration.MigrationFSType { - if contentType == drivers.ContentTypeBlock { + if drivers.IsContentBlock(contentType) { return migration.MigrationFSType_BLOCK_AND_RSYNC } From f54ba7a5e5c28bfc509f601d7a47b9b5fafe96e5 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Thu, 22 Jun 2023 18:14:27 +0200 Subject: [PATCH 130/543] lxd/storage/drivers/common: Handle ContentTypeISO Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_common.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/storage/drivers/driver_common.go b/lxd/storage/drivers/driver_common.go index 570d57ec2304..6e5abd70325a 100644 --- a/lxd/storage/drivers/driver_common.go +++ b/lxd/storage/drivers/driver_common.go @@ -200,7 +200,7 @@ func (d *common) MigrationTypes(contentType ContentType, refresh bool, copySnaps rsyncFeatures = []string{"xattrs", "delete", "compress", "bidirectional"} } - if contentType == ContentTypeBlock { + if IsContentBlock(contentType) { transportType = migration.MigrationFSType_BLOCK_AND_RSYNC } else { transportType = migration.MigrationFSType_RSYNC From 0556458c45ebe87c08af02c7531edbaec04266c4 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Thu, 22 Jun 2023 18:13:45 +0200 Subject: [PATCH 131/543] lxd/storage/drivers/generic: Handle ContentTypeISO Signed-off-by: Thomas Hipp --- lxd/storage/drivers/generic_vfs.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lxd/storage/drivers/generic_vfs.go b/lxd/storage/drivers/generic_vfs.go index 850c627ec355..d71e36ae019c 100644 --- a/lxd/storage/drivers/generic_vfs.go +++ b/lxd/storage/drivers/generic_vfs.go @@ -257,14 +257,14 @@ func genericVFSMigrateVolume(d Driver, s *state.State, vol Volume, conn io.ReadW // Send volume to target (ensure local volume is mounted if needed). return vol.MountTask(func(mountPath string, op *operations.Operation) error { - if vol.contentType != ContentTypeBlock || vol.volType != VolumeTypeCustom { + if !IsContentBlock(vol.contentType) || vol.volType != VolumeTypeCustom { err := sendFSVol(vol, conn, mountPath) if err != nil { return err } } - if vol.IsVMBlock() || (vol.contentType == ContentTypeBlock && vol.volType == VolumeTypeCustom) { + if vol.IsVMBlock() || (IsContentBlock(vol.contentType) && vol.volType == VolumeTypeCustom) { err := sendBlockVol(vol, conn) if err != nil { return err @@ -279,7 +279,7 @@ func genericVFSMigrateVolume(d Driver, s *state.State, vol Volume, conn io.ReadW // initVolume is run against the main volume (not the snapshots) and is often used for quota initialization. func genericVFSCreateVolumeFromMigration(d Driver, initVolume func(vol Volume) (revert.Hook, error), vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error { // Check migration transport type matches volume type. - if vol.contentType == ContentTypeBlock { + if IsContentBlock(vol.contentType) { if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_BLOCK_AND_RSYNC { return ErrNotSupported } @@ -354,7 +354,7 @@ func genericVFSCreateVolumeFromMigration(d Driver, initVolume func(vol Volume) ( path := shared.AddSlash(mountPath) pathBlock := "" - if vol.IsVMBlock() || (vol.contentType == ContentTypeBlock && vol.volType == VolumeTypeCustom) { + if vol.IsVMBlock() || (IsContentBlock(vol.contentType) && vol.volType == VolumeTypeCustom) { pathBlock, err = d.GetVolumeDiskPath(vol) if err != nil { return fmt.Errorf("Error getting VM block volume disk path: %w", err) @@ -402,7 +402,7 @@ func genericVFSCreateVolumeFromMigration(d Driver, initVolume func(vol Volume) ( } } - if vol.contentType != ContentTypeBlock || vol.volType != VolumeTypeCustom { + if !IsContentBlock(vol.contentType) || vol.volType != VolumeTypeCustom { // Receive main volume. err = recvFSVol(vol.name, conn, path) if err != nil { @@ -411,7 +411,7 @@ func genericVFSCreateVolumeFromMigration(d Driver, initVolume func(vol Volume) ( } // Receive the final main volume sync if needed. - if volTargetArgs.Live && (vol.contentType != ContentTypeBlock || vol.volType != VolumeTypeCustom) { + if volTargetArgs.Live && (!IsContentBlock(vol.contentType) || vol.volType != VolumeTypeCustom) { d.Logger().Debug("Starting main volume final sync", logger.Ctx{"volName": vol.name, "path": path}) err = recvFSVol(vol.name, conn, path) if err != nil { @@ -427,7 +427,7 @@ func genericVFSCreateVolumeFromMigration(d Driver, initVolume func(vol Volume) ( } // Receive the block volume next (if needed). - if vol.IsVMBlock() || (vol.contentType == ContentTypeBlock && vol.volType == VolumeTypeCustom) { + if vol.IsVMBlock() || (IsContentBlock(vol.contentType) && vol.volType == VolumeTypeCustom) { err = recvBlockVol(vol.name, conn, pathBlock) if err != nil { return err From 06556501d25ec84f57a5eecca706ae27bb432231 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Mon, 19 Jun 2023 12:39:06 +0200 Subject: [PATCH 132/543] lxd/storage/drivers/dir: Handle ContentTypeISO Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_dir_volumes.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lxd/storage/drivers/driver_dir_volumes.go b/lxd/storage/drivers/driver_dir_volumes.go index 637ce3e6feac..c1b09132f2ff 100644 --- a/lxd/storage/drivers/driver_dir_volumes.go +++ b/lxd/storage/drivers/driver_dir_volumes.go @@ -41,9 +41,9 @@ func (d *dir) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Oper revert.Add(func() { _ = os.RemoveAll(volPath) }) - // Create sparse loopback file if volume is block. + // Get path to disk volume if volume is block or iso. rootBlockPath := "" - if vol.contentType == ContentTypeBlock { + if IsContentBlock(vol.contentType) { // We expect the filler to copy the VM image into this path. rootBlockPath, err = d.GetVolumeDiskPath(vol) if err != nil { @@ -68,8 +68,9 @@ func (d *dir) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Oper } // If we are creating a block volume, resize it to the requested size or the default. - // We expect the filler function to have converted the qcow2 image to raw into the rootBlockPath. - if vol.contentType == ContentTypeBlock { + // For block volumes, we expect the filler function to have converted the qcow2 image to raw into the rootBlockPath. + // For ISOs the content will just be copied. + if IsContentBlock(vol.contentType) { // Convert to bytes. sizeBytes, err := units.ParseByteSizeString(vol.ConfigSize()) if err != nil { From 25874f34112c8d1106b155718ffbd9ef54369f9c Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Tue, 20 Jun 2023 12:16:53 +0200 Subject: [PATCH 133/543] lxd/storage/drivers/btrfs: Handle ContentTypeISO Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_btrfs.go | 4 ++-- lxd/storage/drivers/driver_btrfs_volumes.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lxd/storage/drivers/driver_btrfs.go b/lxd/storage/drivers/driver_btrfs.go index 5c8ed0c66b8e..ea2146410b53 100644 --- a/lxd/storage/drivers/driver_btrfs.go +++ b/lxd/storage/drivers/driver_btrfs.go @@ -491,7 +491,7 @@ func (d *btrfs) MigrationTypes(contentType ContentType, refresh bool, copySnapsh if d.state.OS.RunningInUserNS { var transportType migration.MigrationFSType - if contentType == ContentTypeBlock { + if IsContentBlock(contentType) { transportType = migration.MigrationFSType_BLOCK_AND_RSYNC } else { transportType = migration.MigrationFSType_RSYNC @@ -505,7 +505,7 @@ func (d *btrfs) MigrationTypes(contentType ContentType, refresh bool, copySnapsh } } - if contentType == ContentTypeBlock { + if IsContentBlock(contentType) { return []migration.Type{ { FSType: migration.MigrationFSType_BTRFS, diff --git a/lxd/storage/drivers/driver_btrfs_volumes.go b/lxd/storage/drivers/driver_btrfs_volumes.go index f1d680b18987..efab267660af 100644 --- a/lxd/storage/drivers/driver_btrfs_volumes.go +++ b/lxd/storage/drivers/driver_btrfs_volumes.go @@ -52,7 +52,7 @@ func (d *btrfs) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Op // Create sparse loopback file if volume is block. rootBlockPath := "" - if vol.contentType == ContentTypeBlock { + if IsContentBlock(vol.contentType) { // We expect the filler to copy the VM image into this path. rootBlockPath, err = d.GetVolumeDiskPath(vol) if err != nil { @@ -81,7 +81,7 @@ func (d *btrfs) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Op // If we are creating a block volume, resize it to the requested size or the default. // We expect the filler function to have converted the qcow2 image to raw into the rootBlockPath. - if vol.contentType == ContentTypeBlock { + if IsContentBlock(vol.contentType) { // Convert to bytes. sizeBytes, err := units.ParseByteSizeString(vol.ConfigSize()) if err != nil { From b2d4bfe8a359bb4db4f95c8ad36d7cc208e2ef67 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Tue, 20 Jun 2023 12:18:29 +0200 Subject: [PATCH 134/543] lxd/storage/drivers/ceph: Handle ContentTypeISO Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_ceph.go | 2 +- lxd/storage/drivers/driver_ceph_volumes.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lxd/storage/drivers/driver_ceph.go b/lxd/storage/drivers/driver_ceph.go index ab4c12fec2df..8fac13800882 100644 --- a/lxd/storage/drivers/driver_ceph.go +++ b/lxd/storage/drivers/driver_ceph.go @@ -392,7 +392,7 @@ func (d *ceph) MigrationTypes(contentType ContentType, refresh bool, copySnapsho if refresh { var transportType migration.MigrationFSType - if contentType == ContentTypeBlock { + if IsContentBlock(contentType) { transportType = migration.MigrationFSType_BLOCK_AND_RSYNC } else { transportType = migration.MigrationFSType_RSYNC diff --git a/lxd/storage/drivers/driver_ceph_volumes.go b/lxd/storage/drivers/driver_ceph_volumes.go index 2b0e6ba79704..a49fabe68613 100644 --- a/lxd/storage/drivers/driver_ceph_volumes.go +++ b/lxd/storage/drivers/driver_ceph_volumes.go @@ -180,7 +180,7 @@ func (d *ceph) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Ope var err error var devPath string - if vol.contentType == ContentTypeBlock { + if IsContentBlock(vol.contentType) { // Get the device path. devPath, err = d.GetVolumeDiskPath(vol) if err != nil { @@ -1052,7 +1052,7 @@ func (d *ceph) SetVolumeQuota(vol Volume, size string, allowUnsafeResize bool, o // GetVolumeDiskPath returns the location of a root disk block device. func (d *ceph) GetVolumeDiskPath(vol Volume) (string, error) { - if vol.IsVMBlock() || (vol.volType == VolumeTypeCustom && vol.contentType == ContentTypeBlock) { + if vol.IsVMBlock() || (vol.volType == VolumeTypeCustom && IsContentBlock(vol.contentType)) { _, devPath, err := d.getRBDMappedDevPath(vol, false) return devPath, err } From 9cefa4a77f9461388fc37a88bd1cec26497cc7d0 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Tue, 20 Jun 2023 12:19:37 +0200 Subject: [PATCH 135/543] lxd/storage/drivers/lvm: Handle ContentTypeISO Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_lvm_volumes.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lxd/storage/drivers/driver_lvm_volumes.go b/lxd/storage/drivers/driver_lvm_volumes.go index 325a44cb1b9f..5c15cf0a53f4 100644 --- a/lxd/storage/drivers/driver_lvm_volumes.go +++ b/lxd/storage/drivers/driver_lvm_volumes.go @@ -61,7 +61,7 @@ func (d *lvm) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Oper var err error var devPath string - if vol.contentType == ContentTypeBlock { + if IsContentBlock(vol.contentType) { // Get the device path. devPath, err = d.GetVolumeDiskPath(vol) if err != nil { @@ -504,7 +504,7 @@ func (d *lvm) SetVolumeQuota(vol Volume, size string, allowUnsafeResize bool, op // GetVolumeDiskPath returns the location of a disk volume. func (d *lvm) GetVolumeDiskPath(vol Volume) (string, error) { - if vol.IsVMBlock() || (vol.volType == VolumeTypeCustom && vol.contentType == ContentTypeBlock) { + if vol.IsVMBlock() || (vol.volType == VolumeTypeCustom && IsContentBlock(vol.contentType)) { volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name) return volDevPath, nil } From a0534883fa825e06cf280d39b47b030b0edb0308 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Thu, 22 Jun 2023 09:03:26 +0200 Subject: [PATCH 136/543] lxd/storage/drivers/zfs: Handle ContentTypeISO Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_zfs.go | 2 +- lxd/storage/drivers/driver_zfs_volumes.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lxd/storage/drivers/driver_zfs.go b/lxd/storage/drivers/driver_zfs.go index ca14061a5575..95a0b4658867 100644 --- a/lxd/storage/drivers/driver_zfs.go +++ b/lxd/storage/drivers/driver_zfs.go @@ -632,7 +632,7 @@ func (d *zfs) MigrationTypes(contentType ContentType, refresh bool, copySnapshot features = append(features, "compress") } - if contentType == ContentTypeBlock { + if IsContentBlock(contentType) { return []migration.Type{ { FSType: migration.MigrationFSType_ZFS, diff --git a/lxd/storage/drivers/driver_zfs_volumes.go b/lxd/storage/drivers/driver_zfs_volumes.go index 0f5b8fdeefe4..da1beab6e950 100644 --- a/lxd/storage/drivers/driver_zfs_volumes.go +++ b/lxd/storage/drivers/driver_zfs_volumes.go @@ -248,7 +248,7 @@ func (d *zfs) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Oper var err error var devPath string - if vol.contentType == ContentTypeBlock { + if IsContentBlock(vol.contentType) { // Get the device path. devPath, err = d.GetVolumeDiskPath(vol) if err != nil { @@ -1570,7 +1570,7 @@ func (d *zfs) SetVolumeQuota(vol Volume, size string, allowUnsafeResize bool, op inUse := vol.MountInUse() // Handle volume datasets. - if vol.contentType == ContentTypeBlock || d.isBlockBacked(vol) && vol.contentType == ContentTypeFS { + if d.isBlockBacked(vol) && vol.contentType == ContentTypeFS || IsContentBlock(vol.contentType) { // Do nothing if size isn't specified. if sizeBytes <= 0 { return nil @@ -1899,7 +1899,7 @@ func (d *zfs) ListVolumes() ([]Volume, error) { // activateVolume activates a ZFS volume if not already active. Returns true if activated, false if not. func (d *zfs) activateVolume(vol Volume) (bool, error) { - if vol.contentType != ContentTypeBlock && !vol.IsBlockBacked() { + if !IsContentBlock(vol.contentType) && !vol.IsBlockBacked() { return false, nil // Nothing to do for non-block or non-block backed volumes. } @@ -2044,7 +2044,7 @@ func (d *zfs) MountVolume(vol Volume, op *operations.Operation) error { revert.Add(func() { _, _ = d.deactivateVolume(vol) }) } - if vol.contentType != ContentTypeBlock && d.isBlockBacked(vol) && !filesystem.IsMountPoint(mountPath) { + if !IsContentBlock(vol.contentType) && d.isBlockBacked(vol) && !filesystem.IsMountPoint(mountPath) { volPath, err := d.GetVolumeDiskPath(vol) if err != nil { return err From 754ad4002e30b82c2b08f9155f200f5511670037 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Mon, 19 Jun 2023 17:33:20 +0200 Subject: [PATCH 137/543] qemu: Set media=cdrom for ISO 9660 images Signed-off-by: Thomas Hipp --- lxd/instance/drivers/driver_qemu.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 62f1c096cb70..d954481b41f6 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -3439,17 +3439,17 @@ func (d *qemu) addDriveConfig(bootIndexes map[string]int, driveConf deviceConfig // Only warn about using writeback cache if the drive image is writable. d.logger.Warn("Using writeback cache I/O", logger.Ctx{"device": driveConf.DevName, "devPath": srcDevPath, "fsType": fsType}) } - - // Special case ISO images as cdroms. - if strings.HasSuffix(srcDevPath, ".iso") { - media = "cdrom" - } } else if !shared.StringInSlice(device.DiskDirectIO, driveConf.Opts) { // If drive config indicates we need to use unsafe I/O then use it. d.logger.Warn("Using unsafe cache I/O", logger.Ctx{"device": driveConf.DevName, "devPath": srcDevPath}) aioMode = "threads" cacheMode = "unsafe" // Use host cache, but ignore all sync requests from guest. } + + // Special case ISO images as cdroms. + if driveConf.FSType == "iso9660" { + media = "cdrom" + } } // Check if the user has overridden the cache mode. From 950e2f1f26754ad8af768af877e694f15b45770a Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Mon, 19 Jun 2023 17:34:36 +0200 Subject: [PATCH 138/543] lxd/device/disk: Handle ISO custom volumes Signed-off-by: Thomas Hipp --- lxd/device/disk.go | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/lxd/device/disk.go b/lxd/device/disk.go index e12577367229..8648de7365fe 100644 --- a/lxd/device/disk.go +++ b/lxd/device/disk.go @@ -315,6 +315,14 @@ func (d *disk) validateConfig(instConf instance.ConfigReader) error { if d.config["path"] != "" { return fmt.Errorf("Custom block volumes cannot have a path defined") } + } else if contentType == db.StoragePoolVolumeContentTypeISO { + if instConf.Type() == instancetype.Container { + return fmt.Errorf("Custom ISO volumes cannot be used on containers") + } + + if d.config["path"] != "" { + return fmt.Errorf("Custom ISO volumes cannot have a path defined") + } } else if d.config["path"] == "" { return fmt.Errorf("Custom filesystem volumes require a path to be defined") } @@ -764,9 +772,13 @@ func (d *disk) startVM() (*deviceConfig.RunConfig, error) { return nil, err } + if contentType == db.StoragePoolVolumeContentTypeISO { + mount.FSType = "iso9660" + } + // If the pool is ceph backed and a block device, don't mount it, instead pass config to QEMU instance // to use the built in RBD support. - if d.pool.Driver().Info().Name == "ceph" && contentType == db.StoragePoolVolumeContentTypeBlock { + if d.pool.Driver().Info().Name == "ceph" && (contentType == db.StoragePoolVolumeContentTypeBlock || contentType == db.StoragePoolVolumeContentTypeISO) { config := d.pool.ToAPI().Config poolName := config["ceph.osd.pool_name"] @@ -780,13 +792,17 @@ func (d *disk) startVM() (*deviceConfig.RunConfig, error) { clusterName = storageDrivers.CephDefaultUser } - runConf.Mounts = []deviceConfig.MountEntryItem{ - { - DevPath: DiskGetRBDFormat(clusterName, userName, poolName, d.config["source"]), - DevName: d.name, - }, + mount := deviceConfig.MountEntryItem{ + DevPath: DiskGetRBDFormat(clusterName, userName, poolName, d.config["source"]), + DevName: d.name, } + if contentType == db.StoragePoolVolumeContentTypeISO { + mount.FSType = "iso9660" + } + + runConf.Mounts = []deviceConfig.MountEntryItem{mount} + return &runConf, nil } @@ -1251,7 +1267,7 @@ func (d *disk) mountPoolVolume() (func(), string, error) { } } - if dbVolume.ContentType == db.StoragePoolVolumeContentTypeNameBlock { + if dbVolume.ContentType == db.StoragePoolVolumeContentTypeNameBlock || dbVolume.ContentType == db.StoragePoolVolumeContentTypeNameISO { srcPath, err = d.pool.GetCustomVolumeDisk(storageProjectName, volumeName) if err != nil { return nil, "", fmt.Errorf("Failed to get disk path: %w", err) From 33615615b54d809a8563e365b92a1a834abfa8b1 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Tue, 20 Jun 2023 12:39:01 +0200 Subject: [PATCH 139/543] client: Add CreateStoragePoolVolumeFromISO Signed-off-by: Thomas Hipp --- client/interfaces.go | 3 ++ client/lxd_storage_volumes.go | 58 +++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/client/interfaces.go b/client/interfaces.go index 813e24a755b1..a47d4ff435f8 100644 --- a/client/interfaces.go +++ b/client/interfaces.go @@ -388,6 +388,9 @@ type InstanceServer interface { GetStoragePoolVolumeBackupFile(pool string, volName string, name string, req *BackupFileRequest) (resp *BackupFileResponse, err error) CreateStoragePoolVolumeFromBackup(pool string, args StoragePoolVolumeBackupArgs) (op Operation, err error) + // Storage volume ISO import function ("custom_volume_iso" API extension) + CreateStoragePoolVolumeFromISO(pool string, args StoragePoolVolumeBackupArgs) (op Operation, err error) + // Cluster functions ("cluster" API extensions) GetCluster() (cluster *api.Cluster, ETag string, err error) UpdateCluster(cluster api.ClusterPut, ETag string) (op Operation, err error) diff --git a/client/lxd_storage_volumes.go b/client/lxd_storage_volumes.go index 8c3f654604a7..6a3089d86c09 100644 --- a/client/lxd_storage_volumes.go +++ b/client/lxd_storage_volumes.go @@ -943,6 +943,64 @@ func (r *ProtocolLXD) GetStoragePoolVolumeBackupFile(pool string, volName string return &resp, nil } +// CreateStoragePoolVolumeFromISO creates a custom volume from an ISO file. +func (r *ProtocolLXD) CreateStoragePoolVolumeFromISO(pool string, args StoragePoolVolumeBackupArgs) (Operation, error) { + err := r.CheckExtension("custom_volume_iso") + if err != nil { + return nil, err + } + + path := fmt.Sprintf("/storage-pools/%s/volumes/custom", url.PathEscape(pool)) + + // Prepare the HTTP request. + reqURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0%s", r.httpBaseURL.String(), path)) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", reqURL, args.BackupFile) + if err != nil { + return nil, err + } + + if args.Name == "" { + return nil, fmt.Errorf("Missing volume name") + } + + req.Header.Set("Content-Type", "application/octet-stream") + req.Header.Set("X-LXD-name", args.Name) + req.Header.Set("X-LXD-type", "iso") + + // Send the request. + resp, err := r.DoHTTP(req) + if err != nil { + return nil, err + } + + defer func() { _ = resp.Body.Close() }() + + // Handle errors. + response, _, err := lxdParseResponse(resp) + if err != nil { + return nil, err + } + + // Get to the operation. + respOperation, err := response.MetadataAsOperation() + if err != nil { + return nil, err + } + + // Setup an Operation wrapper. + op := operation{ + Operation: *respOperation, + r: r, + chActive: make(chan bool), + } + + return &op, nil +} + // CreateStoragePoolVolumeFromBackup creates a custom volume from a backup file. func (r *ProtocolLXD) CreateStoragePoolVolumeFromBackup(pool string, args StoragePoolVolumeBackupArgs) (Operation, error) { if !r.HasExtension("custom_volume_backup") { From 36800776e301cbf86a74778e15cb3c9f77d0ecde Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Fri, 30 Jun 2023 15:45:27 +0200 Subject: [PATCH 140/543] lxd/storage: Rephrase restriction of snapshots Signed-off-by: Thomas Hipp --- lxd/storage/backend_lxd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go index 439af86c3050..51e324089f2e 100644 --- a/lxd/storage/backend_lxd.go +++ b/lxd/storage/backend_lxd.go @@ -4486,7 +4486,7 @@ func (b *lxdBackend) CreateCustomVolumeSnapshot(projectName, volName string, new defer l.Debug("CreateCustomVolumeSnapshot finished") if shared.IsSnapshot(volName) { - return fmt.Errorf("Volume cannot be snapshot") + return fmt.Errorf("Volume does not support snapshots") } if shared.IsSnapshot(newSnapshotName) { From 845921aa644ef8e76c45561ec745137234e53535 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Mon, 3 Jul 2023 23:29:21 -0400 Subject: [PATCH 141/543] lxd/device/veth: Fix MTU handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- lxd/device/device_utils_network.go | 41 ++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/lxd/device/device_utils_network.go b/lxd/device/device_utils_network.go index 1b319047d448..8f14727fe5c8 100644 --- a/lxd/device/device_utils_network.go +++ b/lxd/device/device_utils_network.go @@ -223,24 +223,45 @@ func networkCreateVethPair(hostName string, m deviceConfig.Device) (string, uint }, } - // Set the MTU on both ends. If not specified and has parent, will inherit MTU from parent. + // Set the MTU on both ends. + // The host side should always line up with the bridge to avoid accidentally lowering the bridge MTU. + // The instance side should use the configured MTU (if any), if not, it should match the host side. + var instanceMTU uint32 + var parentMTU uint32 + + if m["parent"] != "" { + mtu, err := network.GetDevMTU(m["parent"]) + if err != nil { + return "", 0, fmt.Errorf("Failed to get the parent MTU: %w", err) + } + + parentMTU = uint32(mtu) + } + if m["mtu"] != "" { mtu, err := strconv.ParseUint(m["mtu"], 10, 32) if err != nil { return "", 0, fmt.Errorf("Invalid MTU specified: %w", err) } - veth.MTU = uint32(mtu) - } else if m["parent"] != "" { - mtu, err := network.GetDevMTU(m["parent"]) - if err != nil { - return "", 0, fmt.Errorf("Failed to get the parent MTU: %w", err) - } + instanceMTU = uint32(mtu) + } - veth.MTU = mtu + if instanceMTU == 0 && parentMTU > 0 { + instanceMTU = parentMTU } - veth.Peer.MTU = veth.MTU + if parentMTU == 0 && instanceMTU > 0 { + parentMTU = instanceMTU + } + + if instanceMTU > 0 { + veth.Peer.MTU = instanceMTU + } + + if parentMTU > 0 { + veth.MTU = parentMTU + } // Set the MAC address on peer. if m["hwaddr"] != "" { @@ -276,7 +297,7 @@ func networkCreateVethPair(hostName string, m deviceConfig.Device) (string, uint return "", 0, fmt.Errorf("Failed to create the veth interfaces %q and %q: %w", hostName, veth.Peer.Name, err) } - return veth.Peer.Name, veth.MTU, nil + return veth.Peer.Name, veth.Peer.MTU, nil } // networkCreateTap creates and configures a TAP device. From 86f3136d472032f1a977e3e70151f90942e2042f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Mon, 3 Jul 2023 23:40:59 -0400 Subject: [PATCH 142/543] lxd/device/tap: Fix MTU handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- lxd/device/device_utils_network.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/lxd/device/device_utils_network.go b/lxd/device/device_utils_network.go index 8f14727fe5c8..c99664260e8b 100644 --- a/lxd/device/device_utils_network.go +++ b/lxd/device/device_utils_network.go @@ -325,7 +325,9 @@ func networkCreateTap(hostName string, m deviceConfig.Device) (uint32, error) { revert.Add(func() { _ = network.InterfaceRemove(hostName) }) - // Set the MTU on peer. If not specified and has parent, will inherit MTU from parent. + // Set the MTU on both ends. + // The host side should always line up with the bridge to avoid accidentally lowering the bridge MTU. + // The instance side should use the configured MTU (if any), if not, it should match the host side. var mtu uint32 if m["mtu"] != "" { nicMTU, err := strconv.ParseUint(m["mtu"], 10, 32) @@ -334,20 +336,22 @@ func networkCreateTap(hostName string, m deviceConfig.Device) (uint32, error) { } mtu = uint32(nicMTU) - } else if m["parent"] != "" { + } + + if m["parent"] != "" { parentMTU, err := network.GetDevMTU(m["parent"]) if err != nil { return 0, fmt.Errorf("Failed to get the parent MTU: %w", err) } - mtu = parentMTU - } - - if mtu > 0 { - err = NetworkSetDevMTU(hostName, mtu) + err = NetworkSetDevMTU(hostName, parentMTU) if err != nil { return 0, fmt.Errorf("Failed to set the MTU %d: %w", mtu, err) } + + if mtu == 0 { + mtu = parentMTU + } } revert.Success() From 78409a413e2b07b02b743983d3aa6fe8d8d0fef8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= Date: Mon, 3 Jul 2023 23:58:22 -0400 Subject: [PATCH 143/543] tests: Update for bridged host MTU behavior MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber --- test/suites/container_devices_nic_bridged.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/suites/container_devices_nic_bridged.sh b/test/suites/container_devices_nic_bridged.sh index 3592096a8024..4274fe8d8de0 100644 --- a/test/suites/container_devices_nic_bridged.sh +++ b/test/suites/container_devices_nic_bridged.sh @@ -88,8 +88,8 @@ test_container_devices_nic_bridged() { false fi - # Check profile custom MTU is applied on host side of veth. - if ! grep "1400" /sys/class/net/"${vethHostName}"/mtu ; then + # Check profile custom MTU doesn't affect the host. + if ! grep "1500" /sys/class/net/"${vethHostName}"/mtu ; then echo "host veth mtu invalid" false fi @@ -157,8 +157,8 @@ test_container_devices_nic_bridged() { false fi - # Check custom MTU is applied host-side on hot-plug. - if ! grep "1401" /sys/class/net/"${vethHostName}"/mtu ; then + # Check custom MTU doesn't affect the host. + if ! grep "1500" /sys/class/net/"${vethHostName}"/mtu ; then echo "host veth mtu invalid" false fi @@ -208,8 +208,8 @@ test_container_devices_nic_bridged() { false fi - # Checl profile custom MTU is applied host-side on hot-removal. - if ! grep "1400" /sys/class/net/"${vethHostName}"/mtu ; then + # Check custom MTU doesn't affect the host. + if ! grep "1500" /sys/class/net/"${vethHostName}"/mtu ; then echo "host veth mtu invalid" false fi From 0eff6c07d0901c4b911621b2dc299549b165146e Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Mon, 3 Jul 2023 11:15:54 +0500 Subject: [PATCH 144/543] added a heading in the client/operations file that was missing Signed-off-by: Bilal Khan --- client/operations.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/client/operations.go b/client/operations.go index 5bee839614bf..8ac33785fd6c 100644 --- a/client/operations.go +++ b/client/operations.go @@ -144,6 +144,9 @@ func (op *operation) WaitContext(ctx context.Context) error { return nil } +// setupListener initiates an event listener for an operation and manages updates to the operation's state. +// It adds handlers to process events, monitors the listener for completion or errors, +// and triggers a manual refresh of the operation's state to prevent race conditions. func (op *operation) setupListener() error { // Make sure we're not racing with ourselves op.handlerLock.Lock() From 7e545f9792c94b47779f3137e2af3dc51aa1e00f Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Mon, 3 Jul 2023 10:57:54 +0500 Subject: [PATCH 145/543] some functions in the client/lxd.go file were missing the documentation headings Signed-off-by: Bilal Khan --- client/lxd.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/client/lxd.go b/client/lxd.go index f138452c65b3..4833fcbd0ee4 100644 --- a/client/lxd.go +++ b/client/lxd.go @@ -102,6 +102,8 @@ func (r *ProtocolLXD) GetConnectionInfo() (*ConnectionInfo, error) { return &info, nil } +// isSameServer compares the calling ProtocolLXD object with the provided server object to check if they are the same server. +// It verifies the equality based on their connection information (Protocol, Certificate, Project, and Target). func (r *ProtocolLXD) isSameServer(server Server) bool { // Short path checking if the two structs are identical. if r == server { @@ -232,6 +234,8 @@ func lxdParseResponse(resp *http.Response) (*api.Response, string, error) { return &response, etag, nil } +// rawQuery is a method that sends an HTTP request to the LXD server with the provided method, URL, data, and ETag. +// It processes the request based on the data's type and handles the HTTP response, returning parsed results or an error if it occurs. func (r *ProtocolLXD) rawQuery(method string, url string, data any, ETag string) (*api.Response, string, error) { var req *http.Request var err error @@ -345,6 +349,8 @@ func (r *ProtocolLXD) query(method string, path string, data any, ETag string) ( return r.rawQuery(method, url, data, ETag) } +// queryStruct sends a query to the LXD server, then converts the response metadata into the specified target struct. +// The function logs the retrieved data, returns the etag of the response, and handles any errors during this process. func (r *ProtocolLXD) queryStruct(method string, path string, data any, ETag string, target any) (string, error) { resp, etag, err := r.query(method, path, data, ETag) if err != nil { @@ -363,6 +369,8 @@ func (r *ProtocolLXD) queryStruct(method string, path string, data any, ETag str return etag, nil } +// queryOperation sends a query to the LXD server and then converts the response metadata into an Operation object. +// It sets up an early event listener, performs the query, processes the response, and manages the lifecycle of the event listener. func (r *ProtocolLXD) queryOperation(method string, path string, data any, ETag string) (Operation, string, error) { // Attempt to setup an early event listener listener, err := r.GetEvents() @@ -405,6 +413,8 @@ func (r *ProtocolLXD) queryOperation(method string, path string, data any, ETag return &op, etag, nil } +// rawWebsocket creates a websocket connection to the provided URL using the underlying HTTP transport of the ProtocolLXD receiver. +// It sets up the request headers, manages the connection handshake, sets TCP timeouts, and handles any errors that may occur during these operations. func (r *ProtocolLXD) rawWebsocket(url string) (*websocket.Conn, error) { // Grab the http transport handler httpTransport, err := r.getUnderlyingHTTPTransport() @@ -450,6 +460,8 @@ func (r *ProtocolLXD) rawWebsocket(url string) (*websocket.Conn, error) { return conn, nil } +// websocket generates a websocket URL based on the provided path and the base URL of the ProtocolLXD receiver. +// It then leverages the rawWebsocket method to establish and return a websocket connection to the generated URL. func (r *ProtocolLXD) websocket(path string) (*websocket.Conn, error) { // Generate the URL var url string From 63c5181f895369f2dd7943e70636ae7a1fc26d82 Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Tue, 4 Jul 2023 15:54:00 +0500 Subject: [PATCH 146/543] Added a heading in the client/lxd_candid.go file that was missing Signed-off-by: Bilal Khan --- client/lxd_candid.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/client/lxd_candid.go b/client/lxd_candid.go index 0d75b8bc30f4..9ea9507e2dfa 100644 --- a/client/lxd_candid.go +++ b/client/lxd_candid.go @@ -4,6 +4,8 @@ import ( "github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery" ) +// setupBakeryClient initializes the bakeryClient with a new client, sets its http field, +// and adds any existing interactors. func (r *ProtocolLXD) setupBakeryClient() { r.bakeryClient = httpbakery.NewClient() r.bakeryClient.Client = r.http From aa9cb2a7ccdf216f989dd2318527f976efe08dcd Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Tue, 4 Jul 2023 16:08:21 +0500 Subject: [PATCH 147/543] Added a heading in the client/lxd_containers.go file that was missing Signed-off-by: Bilal Khan --- client/lxd_containers.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/client/lxd_containers.go b/client/lxd_containers.go index c9e9d83c1937..1e94447ffcef 100644 --- a/client/lxd_containers.go +++ b/client/lxd_containers.go @@ -1201,6 +1201,8 @@ func (r *ProtocolLXD) RenameContainerSnapshot(containerName string, name string, return op, nil } +// tryMigrateContainerSnapshot attempts to migrate a container snapshot from the source instance server to one of the target URLs. +// It runs the migration asynchronously and returns a RemoteOperation to track the migration status and any errors. func (r *ProtocolLXD) tryMigrateContainerSnapshot(source InstanceServer, containerName string, name string, req api.ContainerSnapshotPost, urls []string) (RemoteOperation, error) { if len(urls) == 0 { return nil, fmt.Errorf("The target server isn't listening on the network") From 99c50408acb96bebeb748afaea0b38ab55e52b84 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Tue, 4 Jul 2023 10:30:22 -0400 Subject: [PATCH 148/543] doc/howto/instances_create: give an example of VM launch with a bigger root disk Signed-off-by: Simon Deziel --- doc/howto/instances_create.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/howto/instances_create.md b/doc/howto/instances_create.md index d08fc3ee1d92..240305dda8c0 100644 --- a/doc/howto/instances_create.md +++ b/doc/howto/instances_create.md @@ -60,6 +60,10 @@ To launch a virtual machine with a Ubuntu 22.04 image from the `images` server u lxc launch images:ubuntu/22.04 ubuntu-vm --vm +Or with a bigger disk: + + lxc launch images:ubuntu/22.04 ubuntu-vm-big --vm --device root,size=30GiB + ### Launch a container with specific configuration options To launch a container and limit its resources to one vCPU and 192 MiB of RAM, enter the following command: From 3d59712e52459706ecb9697a4e6f9c6480d7e625 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Tue, 4 Jul 2023 10:31:32 -0400 Subject: [PATCH 149/543] doc/howto/instances_create: s/a Ubuntu/an Ubuntu/ Signed-off-by: Simon Deziel --- doc/howto/instances_create.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/howto/instances_create.md b/doc/howto/instances_create.md index 240305dda8c0..611c74cd98ae 100644 --- a/doc/howto/instances_create.md +++ b/doc/howto/instances_create.md @@ -50,13 +50,13 @@ The following examples use `lxc launch`, but you can use `lxc init` in the same ### Launch a container -To launch a container with a Ubuntu 22.04 image from the `images` server using the instance name `ubuntu-container`, enter the following command: +To launch a container with an Ubuntu 22.04 image from the `images` server using the instance name `ubuntu-container`, enter the following command: lxc launch images:ubuntu/22.04 ubuntu-container ### Launch a virtual machine -To launch a virtual machine with a Ubuntu 22.04 image from the `images` server using the instance name `ubuntu-vm`, enter the following command: +To launch a virtual machine with an Ubuntu 22.04 image from the `images` server using the instance name `ubuntu-vm`, enter the following command: lxc launch images:ubuntu/22.04 ubuntu-vm --vm From 6a466efd5c3dde91395cd5cc60154daab6431203 Mon Sep 17 00:00:00 2001 From: Din Music Date: Tue, 4 Jul 2023 18:01:43 +0200 Subject: [PATCH 150/543] doc: Fix typo in edit a profile section Signed-off-by: Din Music --- doc/profiles.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/profiles.md b/doc/profiles.md index 0e18be4bcac7..7323c21b6236 100644 --- a/doc/profiles.md +++ b/doc/profiles.md @@ -50,11 +50,11 @@ Specify the profile name and the key and value of the instance option: To add and configure an instance device for your profile, use the `lxc profile device add` command. Specify the profile name, a device name, the device type and maybe device options (depending on the {ref}`device type `): - lxc profile device add = = ... + lxc profile device add = = ... To configure instance device options for a device that you have added to the profile earlier, use the `lxc profile device set` command: - lxc profile device set = = ... + lxc profile device set = = ... ### Edit the full profile From 6612b0e0084232ced3a88932c4e37cdcd4a759b8 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Tue, 4 Jul 2023 21:49:40 +0100 Subject: [PATCH 151/543] shared: Rename imports to new go module Signed-off-by: Thomas Parrott And make tests go fmt safe. Signed-off-by: Thomas Parrott --- shared/cmd/table_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/shared/cmd/table_test.go b/shared/cmd/table_test.go index 6be20556e15b..0e1a4165ac52 100644 --- a/shared/cmd/table_test.go +++ b/shared/cmd/table_test.go @@ -133,11 +133,11 @@ foo,1,/1.0/instances/foo sortColumns: "i", columnMap: testDataTypeColumnMap, }, - expect: ` SOME STRING SOME INTEGER - foo 1 - bar 2 - fizz 3 - buzz 4 + expect: ` SOME STRING SOME INTEGER ` + ` + foo 1 ` + ` + bar 2 ` + ` + fizz 3 ` + ` + buzz 4 ` + ` `, expectErr: nil, }, From 8b0c1d5cdae6406c19192eaa1a0cb7a761a6a475 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Tue, 4 Jul 2023 17:55:16 +0200 Subject: [PATCH 152/543] lxd/instance/drivers/qemu: Skip container config keys MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/instance/drivers/driver_qemu.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index d954481b41f6..87880e10d2b3 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -4949,6 +4949,12 @@ func (d *qemu) Update(args db.InstanceArgs, userRequested bool) error { } isLiveUpdatable := func(key string) bool { + // Skip container config keys for VMs + _, ok := shared.InstanceConfigKeysContainer[key] + if ok { + return true + } + if key == "limits.cpu" { return d.architectureSupportsCPUHotplug() } From 775e8f01a873f5f77821f5becfd5eb2b7e704f28 Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Wed, 5 Jul 2023 17:44:00 +0500 Subject: [PATCH 153/543] In the lxd_instance.go file, I added the heading to the rebuildInstance() function Signed-off-by: Bilal Khan --- client/lxd_instances.go | 1 + 1 file changed, 1 insertion(+) diff --git a/client/lxd_instances.go b/client/lxd_instances.go index cd52b8b8a9c5..afef88c68957 100644 --- a/client/lxd_instances.go +++ b/client/lxd_instances.go @@ -211,6 +211,7 @@ func (r *ProtocolLXD) UpdateInstances(state api.InstancesPut, ETag string) (Oper return op, nil } +// rebuildInstance initiates a rebuild of a given instance on the LXD Protocol server and returns the corresponding operation or an error. func (r *ProtocolLXD) rebuildInstance(instanceName string, instance api.InstanceRebuildPost) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { From 277e40aa96c31da3263d55719cc3527abb7e0165 Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Wed, 5 Jul 2023 17:46:06 +0500 Subject: [PATCH 154/543] In the lxd_instance.go file, I added the heading to the tryRebuildInstance() function Signed-off-by: Bilal Khan --- client/lxd_instances.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/client/lxd_instances.go b/client/lxd_instances.go index afef88c68957..68b1e30c7dd7 100644 --- a/client/lxd_instances.go +++ b/client/lxd_instances.go @@ -227,6 +227,8 @@ func (r *ProtocolLXD) rebuildInstance(instanceName string, instance api.Instance return op, nil } +// tryRebuildInstance attempts to rebuild a specific instance on multiple target servers identified by their URLs. +// It runs the rebuild process asynchronously and returns a RemoteOperation to monitor the progress and any errors. func (r *ProtocolLXD) tryRebuildInstance(instanceName string, req api.InstanceRebuildPost, urls []string, op Operation) (RemoteOperation, error) { if len(urls) == 0 { return nil, fmt.Errorf("The source server isn't listening on the network") From 73c5d021a496f38749997520755d48984f6a937a Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Wed, 5 Jul 2023 17:46:58 +0500 Subject: [PATCH 155/543] In the lxd_instance.go file, I added the heading to the tryCreateInstance() function Signed-off-by: Bilal Khan --- client/lxd_instances.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/client/lxd_instances.go b/client/lxd_instances.go index 68b1e30c7dd7..3c13207dd44a 100644 --- a/client/lxd_instances.go +++ b/client/lxd_instances.go @@ -612,6 +612,8 @@ func (r *ProtocolLXD) CreateInstance(instance api.InstancesPost) (Operation, err return op, nil } +// tryCreateInstance attempts to create a new instance on multiple target servers specified by their URLs. +// It runs the instance creation asynchronously and returns a RemoteOperation to monitor the progress and any errors. func (r *ProtocolLXD) tryCreateInstance(req api.InstancesPost, urls []string, op Operation) (RemoteOperation, error) { if len(urls) == 0 { return nil, fmt.Errorf("The source server isn't listening on the network") From a4d16bf401d0a0325e41efca1678fc019e6ecc80 Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Wed, 5 Jul 2023 17:47:26 +0500 Subject: [PATCH 156/543] In the lxd_instance.go file, I added the heading to the tryMigrateInstance() function Signed-off-by: Bilal Khan --- client/lxd_instances.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/client/lxd_instances.go b/client/lxd_instances.go index 3c13207dd44a..26b95272021e 100644 --- a/client/lxd_instances.go +++ b/client/lxd_instances.go @@ -974,6 +974,8 @@ func (r *ProtocolLXD) RenameInstance(name string, instance api.InstancePost) (Op return op, nil } +// tryMigrateInstance attempts to migrate a specific instance from a source server to one of the target URLs. +// The function runs the migration operation asynchronously and returns a RemoteOperation to track the progress and handle any errors. func (r *ProtocolLXD) tryMigrateInstance(source InstanceServer, name string, req api.InstancePost, urls []string) (RemoteOperation, error) { if len(urls) == 0 { return nil, fmt.Errorf("The target server isn't listening on the network") From 00ad621cd4b32bb38b64270f17a6d7f63409cc8a Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Wed, 5 Jul 2023 19:35:30 +0500 Subject: [PATCH 157/543] added headings in the lxd_storage_volumes.go file functions that were missing Signed-off-by: Bilal Khan --- client/lxd_storage_volumes.go | 2 ++ client/util.go | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/client/lxd_storage_volumes.go b/client/lxd_storage_volumes.go index 6a3089d86c09..eff444e05e43 100644 --- a/client/lxd_storage_volumes.go +++ b/client/lxd_storage_volumes.go @@ -431,6 +431,8 @@ func (r *ProtocolLXD) tryMigrateStoragePoolVolume(source InstanceServer, pool st return &rop, nil } +// tryCreateStoragePoolVolume attempts to create a storage volume in the specified storage pool. +// It will try to do this on every server in the provided list of urls, and waits for the creation to be complete. func (r *ProtocolLXD) tryCreateStoragePoolVolume(pool string, req api.StorageVolumesPost, urls []string) (RemoteOperation, error) { if len(urls) == 0 { return nil, fmt.Errorf("The source server isn't listening on the network") diff --git a/client/util.go b/client/util.go index 711697635211..924c5b94d390 100644 --- a/client/util.go +++ b/client/util.go @@ -13,6 +13,10 @@ import ( "github.com/canonical/lxd/shared" ) +// tlsHTTPClient creates an HTTP client with a specified Transport Layer Security (TLS) configuration. +// It takes in parameters for client certificates, keys, Certificate Authority, server certificates, +// a boolean for skipping verification, a proxy function, and a transport wrapper function. +// It returns the HTTP client with the provided configurations and handles any errors that might occur during the setup process. func tlsHTTPClient(client *http.Client, tlsClientCert string, tlsClientKey string, tlsCA string, tlsServerCert string, insecureSkipVerify bool, proxy func(req *http.Request) (*url.URL, error), transportWrapper func(t *http.Transport) HTTPTransporter) (*http.Client, error) { // Get the TLS configuration tlsConfig, err := shared.GetTLSConfigMem(tlsClientCert, tlsClientKey, tlsCA, tlsServerCert, insecureSkipVerify) From 658e19dba1da2da52083e1add697c88c10fc05d5 Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Wed, 5 Jul 2023 19:35:56 +0500 Subject: [PATCH 158/543] added headings in the util.go file function that was missing Signed-off-by: Bilal Khan --- client/util.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/client/util.go b/client/util.go index 924c5b94d390..6fbd2ba8f7a3 100644 --- a/client/util.go +++ b/client/util.go @@ -109,6 +109,10 @@ func tlsHTTPClient(client *http.Client, tlsClientCert string, tlsClientKey strin return client, nil } +// unixHTTPClient creates an HTTP client that communicates over a Unix socket. +// It takes in the connection arguments and the Unix socket path as parameters. +// The function sets up a Unix socket dialer, configures the HTTP transport, and returns the HTTP client with the specified configurations. +// Any errors encountered during the setup process are also handled by the function. func unixHTTPClient(args *ConnectionArgs, path string) (*http.Client, error) { // Setup a Unix socket dialer unixDial := func(_ context.Context, network, addr string) (net.Conn, error) { From c22ab23cfa01400b4783556c885d55c6036cd75b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Wed, 5 Jul 2023 18:11:03 +0200 Subject: [PATCH 159/543] tests/cluster/groups: Add test for cluster group edits via PUT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- test/suites/clustering.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh index 64f797894ac9..47d49860e53f 100644 --- a/test/suites/clustering.sh +++ b/test/suites/clustering.sh @@ -3292,6 +3292,16 @@ test_clustering_groups() { lxc cluster group create cluster:foobar [ "$(lxc query cluster:/1.0/cluster/groups/foobar | jq '.members | length')" -eq 0 ] + # Copy both description and members from default group + lxc cluster group show cluster:default | lxc cluster group edit cluster:foobar + [ "$(lxc query cluster:/1.0/cluster/groups/foobar | jq '.description == "Default cluster group"')" = "true" ] + [ "$(lxc query cluster:/1.0/cluster/groups/foobar | jq '.members | length')" -eq 3 ] + + # Delete all members from new group + lxc cluster group remove cluster:node1 foobar + lxc cluster group remove cluster:node2 foobar + lxc cluster group remove cluster:node3 foobar + # Add second node to new group. Node2 will now belong to both groups. lxc cluster group assign cluster:node2 default,foobar [ "$(lxc query cluster:/1.0/cluster/members/node2 | jq 'any(.groups[] == "default"; .)')" = "true" ] From d33a12b5cc7a532717c78249df33a211c60d14d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Mon, 3 Jul 2023 10:41:47 +0200 Subject: [PATCH 160/543] lxd/device/disk: Disable hot plugging of directory disks from VMs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/device/disk.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/lxd/device/disk.go b/lxd/device/disk.go index 8648de7365fe..f6e1c61a7512 100644 --- a/lxd/device/disk.go +++ b/lxd/device/disk.go @@ -101,9 +101,19 @@ func (d *disk) CanMigrate() bool { return false } +// sourceIsDir returns true if the disks source config setting is a directory. +func (d *disk) sourceIsDir() bool { + return shared.IsDir(d.config["source"]) +} + +// sourceIsCephFs returns true if the disks source config setting is a CephFS share. +func (d *disk) sourceIsCephFs() bool { + return strings.HasPrefix(d.config["source"], "cephfs:") +} + // CanHotPlug returns whether the device can be managed whilst the instance is running. func (d *disk) CanHotPlug() bool { - return true + return !(d.sourceIsDir() || d.sourceIsCephFs()) || d.inst.Type() == instancetype.Container } // validateConfig checks the supplied config for correctness. From 8df5bf407e65d4de96cedf88bd33cbf8e03e94dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Mon, 3 Jul 2023 11:21:01 +0200 Subject: [PATCH 161/543] lxd/instance/drivers/qemu: Remove obsolete directory disk check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This check can now be removed in favor of the more general check if a directory disk allows hot plugging. For the deviceDetachBlockDevice counterpart function there never was such a check. Signed-off-by: Julian Pelizäus --- lxd/instance/drivers/driver_qemu.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 87880e10d2b3..4dccd424f76f 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -2016,10 +2016,6 @@ func (d *qemu) deviceStart(dev device.Device, instanceRunning bool) (*deviceConf } func (d *qemu) deviceAttachBlockDevice(deviceName string, configCopy map[string]string, mount deviceConfig.MountEntryItem) error { - if mount.FSType == "9p" { - return fmt.Errorf("Cannot attach directory while instance is running") - } - // Check if the agent is running. monitor, err := qmp.Connect(d.monitorPath(), qemuSerialChardevName, d.getMonitorEventHandler()) if err != nil { From 7d9804fc690cca622feb0a06dcb3d3e048489a7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Tue, 4 Jul 2023 15:51:26 +0200 Subject: [PATCH 162/543] lxd/device/disk: Use functions to check for Ceph RBD or FS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/device/disk.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/lxd/device/disk.go b/lxd/device/disk.go index f6e1c61a7512..acdd28b5fcfe 100644 --- a/lxd/device/disk.go +++ b/lxd/device/disk.go @@ -111,6 +111,11 @@ func (d *disk) sourceIsCephFs() bool { return strings.HasPrefix(d.config["source"], "cephfs:") } +// sourceIsCeph returns true if the disks source config setting is a Ceph RBD. +func (d *disk) sourceIsCeph() bool { + return strings.HasPrefix(d.config["source"], "ceph:") +} + // CanHotPlug returns whether the device can be managed whilst the instance is running. func (d *disk) CanHotPlug() bool { return !(d.sourceIsDir() || d.sourceIsCephFs()) || d.inst.Type() == instancetype.Container @@ -138,7 +143,7 @@ func (d *disk) sourceIsLocalPath(source string) bool { return false } - if shared.StringHasPrefix(d.config["source"], "ceph:", "cephfs:") { + if d.sourceIsCeph() || d.sourceIsCephFs() { return false } @@ -227,7 +232,7 @@ func (d *disk) validateConfig(instConf instance.ConfigReader) error { } // Check ceph options are only used when ceph or cephfs type source is specified. - if !shared.StringHasPrefix(d.config["source"], "ceph:", "cephfs:") && (d.config["ceph.cluster_name"] != "" || d.config["ceph.user_name"] != "") { + if !(d.sourceIsCeph() || d.sourceIsCephFs()) && (d.config["ceph.cluster_name"] != "" || d.config["ceph.user_name"] != "") { return fmt.Errorf("Invalid options ceph.cluster_name/ceph.user_name for source %q", d.config["source"]) } @@ -735,7 +740,7 @@ func (d *disk) startVM() (*deviceConfig.RunConfig, error) { revert.Success() return &runConf, nil } else if d.config["source"] != "" { - if strings.HasPrefix(d.config["source"], "ceph:") { + if d.sourceIsCeph() { // Get the pool and volume names. fields := strings.SplitN(d.config["source"], ":", 2) fields = strings.SplitN(fields[1], "/", 2) @@ -833,7 +838,7 @@ func (d *disk) startVM() (*deviceConfig.RunConfig, error) { // If the source being added is a directory or cephfs share, then we will use the lxd-agent // directory sharing feature to mount the directory inside the VM, and as such we need to // indicate to the VM the target path to mount to. - if shared.IsDir(mount.DevPath) || strings.HasPrefix(d.config["source"], "cephfs:") { + if shared.IsDir(mount.DevPath) || d.sourceIsCephFs() { // Mount the source in the instance devices directory. // This will ensure that if the exported directory configured as readonly that this // takes effect event if using virtio-fs (which doesn't support read only mode) by @@ -1307,7 +1312,7 @@ func (d *disk) createDevice(srcPath string) (func(), string, bool, error) { var isFile bool if d.config["pool"] == "" { - if strings.HasPrefix(d.config["source"], "cephfs:") { + if d.sourceIsCephFs() { // Get fs name and path from d.config. fields := strings.SplitN(d.config["source"], ":", 2) fields = strings.SplitN(fields[1], "/", 2) @@ -1327,7 +1332,7 @@ func (d *disk) createDevice(srcPath string) (func(), string, bool, error) { fsName = "ceph" srcPath = mntSrcPath isFile = false - } else if strings.HasPrefix(d.config["source"], "ceph:") { + } else if d.sourceIsCeph() { // Get the pool and volume names. fields := strings.SplitN(d.config["source"], ":", 2) fields = strings.SplitN(fields[1], "/", 2) @@ -1713,7 +1718,7 @@ func (d *disk) postStop() error { } } - if strings.HasPrefix(d.config["source"], "ceph:") { + if d.sourceIsCeph() { v := d.volatileGet() err := diskCephRbdUnmap(v["ceph_rbd"]) if err != nil { From bb94fdc141cf3cdf395d5100ffd298817e043bbc Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Thu, 6 Jul 2023 17:59:17 +0500 Subject: [PATCH 163/543] added multiple headings in the files of lxc directory Signed-off-by: Bilal Khan --- lxc/config/remote.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lxc/config/remote.go b/lxc/config/remote.go index 4305533b6f29..c98340e7ecc1 100644 --- a/lxc/config/remote.go +++ b/lxc/config/remote.go @@ -187,6 +187,10 @@ func (c *Config) GetImageServer(name string) (lxd.ImageServer, error) { return d, nil } +// getConnectionArgs retrieves the connection arguments for the specified remote. +// It constructs the necessary connection arguments based on the remote's configuration, including authentication type, +// authentication interactors, cookie jar, OIDC tokens, TLS certificates, and client key. +// The function returns the connection arguments or an error if any configuration is missing or encounters a problem. func (c *Config) getConnectionArgs(name string) (*lxd.ConnectionArgs, error) { remote := c.Remotes[name] args := lxd.ConnectionArgs{ From ce8509c6e5fa801812db615559cb424c0d1f6f6d Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Thu, 6 Jul 2023 18:02:37 +0500 Subject: [PATCH 164/543] added multiple headings in the action.go file of lxc directory Signed-off-by: Bilal Khan --- lxc/action.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lxc/action.go b/lxc/action.go index 6f27496cb104..98c215f6756c 100644 --- a/lxc/action.go +++ b/lxc/action.go @@ -20,6 +20,8 @@ type cmdStart struct { action *cmdAction } +// The function Command() returns a cobra.Command object representing the "start" command. +// It is used to start one or more instances specified by the user. func (c *cmdStart) Command() *cobra.Command { cmdAction := cmdAction{global: c.global} c.action = &cmdAction @@ -39,6 +41,8 @@ type cmdPause struct { action *cmdAction } +// The function Command() returns a cobra.Command object representing the "pause" command. +// It is used to pause (or freeze) one or more instances specified by the user. This command is hidden and has an alias "freeze". func (c *cmdPause) Command() *cobra.Command { cmdAction := cmdAction{global: c.global} c.action = &cmdAction @@ -60,6 +64,8 @@ type cmdRestart struct { action *cmdAction } +// The function Command() returns a cobra.Command object representing the "restart" command. +// It is used to restart one or more instances specified by the user. This command restarts the instances, which is the opposite of the "pause" command. func (c *cmdRestart) Command() *cobra.Command { cmdAction := cmdAction{global: c.global} c.action = &cmdAction @@ -81,6 +87,8 @@ type cmdStop struct { action *cmdAction } +// The function Command() returns a cobra.Command object representing the "stop" command. +// It is used to stop one or more instances specified by the user. This command stops the instances, effectively shutting them down. func (c *cmdStop) Command() *cobra.Command { cmdAction := cmdAction{global: c.global} c.action = &cmdAction @@ -105,6 +113,8 @@ type cmdAction struct { flagTimeout int } +// Command is a method of the cmdAction structure which constructs and configures a cobra Command object. +// It creates a command with a specific action, defines flags based on that action, and assigns appropriate help text. func (c *cmdAction) Command(action string) *cobra.Command { cmd := &cobra.Command{} cmd.RunE = c.Run @@ -130,6 +140,8 @@ func (c *cmdAction) Command(action string) *cobra.Command { return cmd } +// doActionAll is a method of the cmdAction structure. It performs a specified action on all instances of a remote resource. +// It ensures that flags and parameters are appropriately set, and handles any errors that may occur during the process. func (c *cmdAction) doActionAll(action string, resource remoteResource) error { if resource.name != "" { // both --all and instance name given. @@ -189,6 +201,8 @@ func (c *cmdAction) doActionAll(action string, resource remoteResource) error { return nil } +// doAction is a method of the cmdAction structure. It carries out a specified action on an instance, +// using a given config and instance name. It manages state changes, flag checks, error handling and console attachment. func (c *cmdAction) doAction(action string, conf *config.Config, nameArg string) error { state := false @@ -287,6 +301,8 @@ func (c *cmdAction) doAction(action string, conf *config.Config, nameArg string) return nil } +// Run is a method of the cmdAction structure that implements the execution logic for the given Cobra command. +// It handles actions on instances (single or all) and manages error handling, console flag restrictions, and batch operations. func (c *cmdAction) Run(cmd *cobra.Command, args []string) error { conf := c.global.conf From be7f2e740a36f5c2429d1b1f8ff2762f87471e45 Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Thu, 6 Jul 2023 18:03:00 +0500 Subject: [PATCH 165/543] added multiple headings in the alias.go file of lxc directory Signed-off-by: Bilal Khan --- lxc/alias.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/lxc/alias.go b/lxc/alias.go index d0f245167484..4368ca978e49 100644 --- a/lxc/alias.go +++ b/lxc/alias.go @@ -14,6 +14,8 @@ type cmdAlias struct { global *cmdGlobal } +// Command is a method of the cmdAlias structure that returns a new cobra Command for managing command aliases. +// This includes commands for adding, listing, renaming, and removing aliases, along with their usage and descriptions. func (c *cmdAlias) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("alias") @@ -49,6 +51,8 @@ type cmdAliasAdd struct { alias *cmdAlias } +// Command is a method of the cmdAliasAdd structure that returns a new cobra Command for adding new command aliases. +// It specifies the command usage, description, and examples, and links it to the RunE method for execution logic. func (c *cmdAliasAdd) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("add", i18n.G(" ")) @@ -64,6 +68,8 @@ func (c *cmdAliasAdd) Command() *cobra.Command { return cmd } +// Run is a method of the cmdAliasAdd structure. It implements the logic to add a new alias command. +// The function checks for valid arguments, verifies if the alias already exists, and if not, adds the new alias to the configuration. func (c *cmdAliasAdd) Run(cmd *cobra.Command, args []string) error { conf := c.global.conf @@ -94,6 +100,8 @@ type cmdAliasList struct { flagFormat string } +// Command is a method of the cmdAliasList structure that returns a new cobra Command for listing command aliases. +// It specifies the command usage, description, aliases, and output formatting options, and links it to the RunE method for execution logic. func (c *cmdAliasList) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("list") @@ -108,6 +116,8 @@ func (c *cmdAliasList) Command() *cobra.Command { return cmd } +// Run is a method of the cmdAliasList structure. It implements the logic to list existing command aliases. +// The function checks for valid arguments, collects all the aliases, sorts them, and renders them in the specified format. func (c *cmdAliasList) Run(cmd *cobra.Command, args []string) error { conf := c.global.conf @@ -139,6 +149,8 @@ type cmdAliasRename struct { alias *cmdAlias } +// Command is a method of the cmdAliasRename structure. It returns a new cobra.Command object. +// This command allows a user to rename existing aliases in the CLI application. func (c *cmdAliasRename) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("rename", i18n.G(" ")) @@ -155,6 +167,8 @@ func (c *cmdAliasRename) Command() *cobra.Command { return cmd } +// Run is a method of the cmdAliasRename structure. It takes a cobra command and a slice of strings as arguments. +// This method checks the validity of arguments, ensures the existence of the old alias, verifies the non-existence of the new alias, and then proceeds to rename the alias in the configuration. func (c *cmdAliasRename) Run(cmd *cobra.Command, args []string) error { conf := c.global.conf @@ -190,6 +204,8 @@ type cmdAliasRemove struct { alias *cmdAlias } +// Command is a method of the cmdAliasRemove structure. It configures and returns a cobra.Command object. +// This command enables the removal of a given alias from the command line interface. func (c *cmdAliasRemove) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("remove", i18n.G("")) @@ -206,6 +222,8 @@ func (c *cmdAliasRemove) Command() *cobra.Command { return cmd } +// Run is a method of the cmdAliasRemove structure that executes the actual operation of the alias removal command. +// It takes as input the name of the alias to be removed and updates the global configuration file to reflect this change. func (c *cmdAliasRemove) Run(cmd *cobra.Command, args []string) error { conf := c.global.conf From 3778b2c47ef69e797c766b4eabc6c3352a125f7c Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Thu, 6 Jul 2023 15:46:17 +0200 Subject: [PATCH 166/543] lxd/instance/drivers: Update system unit documentation This updates the `Documentation` key in the lxd-agent service file. Signed-off-by: Thomas Hipp --- lxd/instance/drivers/driver_qemu.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 4dccd424f76f..2e19ed418f9f 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -2385,7 +2385,7 @@ func (d *qemu) generateConfigShare() error { lxdAgentServiceUnit := `[Unit] Description=LXD - agent -Documentation=https://linuxcontainers.org/lxd +Documentation=https://documentation.ubuntu.com/lxd/en/latest/ ConditionPathExists=/dev/virtio-ports/org.linuxcontainers.lxd Before=cloud-init.target cloud-init.service cloud-init-local.service DefaultDependencies=no From 54305411643e4d3805a4cc18b6c8643a8a3a4080 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Wed, 5 Jul 2023 09:02:48 +0200 Subject: [PATCH 167/543] lxd/instance/drivers/qemu: Restructure live updateable keys prefix check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/instance/drivers/driver_qemu.go | 38 +++++++++-------------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 2e19ed418f9f..ab8abeee38f7 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -4944,6 +4944,16 @@ func (d *qemu) Update(args db.InstanceArgs, userRequested bool) error { "security.secureboot", } + liveUpdateKeyPrefixes := []string{ + "boot.", + "cloud-init.", + "environment.", + "image.", + "snapshots.", + "user.", + "volatile.", + } + isLiveUpdatable := func(key string) bool { // Skip container config keys for VMs _, ok := shared.InstanceConfigKeysContainer[key] @@ -4955,35 +4965,11 @@ func (d *qemu) Update(args db.InstanceArgs, userRequested bool) error { return d.architectureSupportsCPUHotplug() } - if strings.HasPrefix(key, "boot.") { - return true - } - - if strings.HasPrefix(key, "cloud-init.") { - return true - } - - if strings.HasPrefix(key, "environment.") { - return true - } - - if strings.HasPrefix(key, "image.") { - return true - } - - if strings.HasPrefix(key, "snapshots.") { - return true - } - - if strings.HasPrefix(key, "user.") { - return true - } - - if strings.HasPrefix(key, "volatile.") { + if shared.StringInSlice(key, liveUpdateKeys) { return true } - if shared.StringInSlice(key, liveUpdateKeys) { + if shared.StringHasPrefix(key, liveUpdateKeyPrefixes...) { return true } From 89f4d785135be888b6369e0fe02b7c591abed258 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Fri, 7 Jul 2023 11:38:41 +0100 Subject: [PATCH 168/543] test: Sleep for 2s when making clustering event hub changes To let the change propagate before testing for it. Signed-off-by: Thomas Parrott --- test/suites/clustering.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh index 47d49860e53f..32b8e839f078 100644 --- a/test/suites/clustering.sh +++ b/test/suites/clustering.sh @@ -3504,7 +3504,7 @@ test_clustering_events() { grep -Fc "cluster-member-updated" "${TEST_DIR}/node${i}.log" | grep -Fx 2 done - sleep 1 # Wait for notification heartbeat to distribute new roles. + sleep 2 # Wait for notification heartbeat to distribute new roles. LXD_DIR="${LXD_ONE_DIR}" lxc info | grep -F "server_event_mode: hub-server" LXD_DIR="${LXD_TWO_DIR}" lxc info | grep -F "server_event_mode: hub-server" LXD_DIR="${LXD_THREE_DIR}" lxc info | grep -F "server_event_mode: hub-client" @@ -3561,7 +3561,7 @@ test_clustering_events() { LXD_DIR="${LXD_FOUR_DIR}" lxc cluster role add node4 event-hub LXD_DIR="${LXD_FIVE_DIR}" lxc cluster role add node5 event-hub - sleep 1 # Wait for notification heartbeat to distribute new roles. + sleep 2 # Wait for notification heartbeat to distribute new roles. LXD_DIR="${LXD_ONE_DIR}" lxc info | grep -F "server_event_mode: hub-client" LXD_DIR="${LXD_TWO_DIR}" lxc info | grep -F "server_event_mode: hub-client" LXD_DIR="${LXD_THREE_DIR}" lxc info | grep -F "server_event_mode: hub-client" From 72a3222674e04b938c99eab0c0fb8aac88f45ea2 Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Fri, 7 Jul 2023 18:00:31 +0500 Subject: [PATCH 169/543] Added headings in the cluster_group.go file of the LXC directory Signed-off-by: Bilal Khan --- lxc/cluster_group.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/lxc/cluster_group.go b/lxc/cluster_group.go index 5f7ac11f04d4..5c6e93d6311d 100644 --- a/lxc/cluster_group.go +++ b/lxc/cluster_group.go @@ -22,6 +22,7 @@ type cmdClusterGroup struct { cluster *cmdCluster } +// Cluster management including assignment, creation, deletion, editing, listing, removal, renaming, and showing details. func (c *cmdClusterGroup) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("group") @@ -70,6 +71,7 @@ type cmdClusterGroupAssign struct { cluster *cmdCluster } +// Setting a groups to cluster members, setting usage, description, examples, and the RunE method. func (c *cmdClusterGroupAssign) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("assign", i18n.G("[:] ")) @@ -89,6 +91,7 @@ lxc cluster group assign foo default return cmd } +// Groups assigning to a cluster member, performing checks, parsing arguments, and updating the member's group configuration. func (c *cmdClusterGroupAssign) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, 2) @@ -142,6 +145,7 @@ type cmdClusterGroupCreate struct { cluster *cmdCluster } +// Creation of a new cluster group, defining its usage, short and long descriptions, and the RunE method. func (c *cmdClusterGroupCreate) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("create", i18n.G("[:]")) @@ -154,6 +158,7 @@ func (c *cmdClusterGroupCreate) Command() *cobra.Command { return cmd } +// It creates new cluster group after performing checks, parsing arguments, and making the server call for creation. func (c *cmdClusterGroupCreate) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) @@ -196,6 +201,7 @@ type cmdClusterGroupDelete struct { cluster *cmdCluster } +// It deletes a cluster group, setting up usage, descriptions, aliases, and the RunE method. func (c *cmdClusterGroupDelete) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("delete", i18n.G("[:]")) @@ -209,6 +215,7 @@ func (c *cmdClusterGroupDelete) Command() *cobra.Command { return cmd } +// It's the deletion of a cluster group after argument checks, parsing, and making the server call for deletion. func (c *cmdClusterGroupDelete) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) @@ -247,6 +254,7 @@ type cmdClusterGroupEdit struct { cluster *cmdCluster } +// This Command generates the cobra command that enables the editing of a cluster group's attributes. func (c *cmdClusterGroupEdit) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("edit", i18n.G("[:]")) @@ -259,6 +267,7 @@ func (c *cmdClusterGroupEdit) Command() *cobra.Command { return cmd } +// The modification of a cluster group's configuration, either through an editor or via the terminal. func (c *cmdClusterGroupEdit) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) @@ -345,6 +354,7 @@ func (c *cmdClusterGroupEdit) Run(cmd *cobra.Command, args []string) error { return nil } +// Returns a string explaining the expected YAML structure for a cluster group configuration. func (c *cmdClusterGroupEdit) helpTemplate() string { return i18n.G( `### This is a YAML representation of the cluster group. @@ -359,6 +369,7 @@ type cmdClusterGroupList struct { flagFormat string } +// Command returns a cobra command to list all the cluster groups in a specified format. func (c *cmdClusterGroupList) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("list", i18n.G("[:]")) @@ -373,6 +384,7 @@ func (c *cmdClusterGroupList) Command() *cobra.Command { return cmd } +// Run executes the command to list all the cluster groups, their descriptions, and number of members. func (c *cmdClusterGroupList) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 0, 1) @@ -432,6 +444,7 @@ type cmdClusterGroupRemove struct { cluster *cmdCluster } +// Removal of a specified member from a specific cluster group. func (c *cmdClusterGroupRemove) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("remove", i18n.G("[:] ")) @@ -444,6 +457,7 @@ func (c *cmdClusterGroupRemove) Command() *cobra.Command { return cmd } +// The removal process of a cluster member from a specific cluster group, with verbose output unless the 'quiet' flag is set. func (c *cmdClusterGroupRemove) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, 2) @@ -502,6 +516,7 @@ type cmdClusterGroupRename struct { cluster *cmdCluster } +// Renaming a cluster group, defining usage, aliases, and linking the associated runtime function. func (c *cmdClusterGroupRename) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("rename", i18n.G("[:] ")) @@ -515,6 +530,7 @@ func (c *cmdClusterGroupRename) Command() *cobra.Command { return cmd } +// Renaming operation of a cluster group after checking arguments and parsing the remote server, and provides appropriate output. func (c *cmdClusterGroupRename) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, 2) @@ -549,6 +565,7 @@ type cmdClusterGroupShow struct { cluster *cmdCluster } +// Setting up the 'show' command to display the configurations of a specified cluster group in a remote server. func (c *cmdClusterGroupShow) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("show", i18n.G("[:]")) @@ -561,6 +578,7 @@ func (c *cmdClusterGroupShow) Command() *cobra.Command { return cmd } +// This retrieves and prints the configuration details of a specified cluster group from a remote server in YAML format. func (c *cmdClusterGroupShow) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) From 7fd78745f5500d45347a2f9633daa99b2058eb6e Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Fri, 7 Jul 2023 18:02:42 +0500 Subject: [PATCH 170/543] Added headings in the cluster_role.go file of the LXC directory Signed-off-by: Bilal Khan --- lxc/cluster_role.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lxc/cluster_role.go b/lxc/cluster_role.go index 93ad5e5d0ed3..78c2750f5c4b 100644 --- a/lxc/cluster_role.go +++ b/lxc/cluster_role.go @@ -15,6 +15,7 @@ type cmdClusterRole struct { cluster *cmdCluster } +// It uses the cmdGlobal, cmdCluster, and cmdClusterRole structs for context and operation. func (c *cmdClusterRole) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("role") @@ -41,6 +42,7 @@ type cmdClusterRoleAdd struct { clusterRole *cmdClusterRole } +// Setting up the usage, short description, and long description of the command, as well as its RunE method. func (c *cmdClusterRoleAdd) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("add", i18n.G("[:] ")) @@ -53,6 +55,7 @@ func (c *cmdClusterRoleAdd) Command() *cobra.Command { return cmd } +// It checks and parses input arguments, verifies role assignment, and updates the member's roles. func (c *cmdClusterRoleAdd) Run(cmd *cobra.Command, args []string) error { exit, err := c.global.CheckArgs(cmd, args, 2, 2) if exit { @@ -95,6 +98,7 @@ type cmdClusterRoleRemove struct { clusterRole *cmdClusterRole } +// Removing the roles from a cluster member, setting up usage, descriptions, and the RunE method. func (c *cmdClusterRoleRemove) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("remove", i18n.G("[:] ")) @@ -107,6 +111,7 @@ func (c *cmdClusterRoleRemove) Command() *cobra.Command { return cmd } +// Run executes the removal of specified roles from a cluster member, checking inputs, validating role assignment, and updating the member's roles. func (c *cmdClusterRoleRemove) Run(cmd *cobra.Command, args []string) error { exit, err := c.global.CheckArgs(cmd, args, 2, 2) if exit { From 5a57e2fc8768981462022210927efffdb0325b5d Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Fri, 7 Jul 2023 15:30:44 +0200 Subject: [PATCH 171/543] lxd/db: Return instance type from GetInstancesByMemberAddress Signed-off-by: Thomas Hipp --- lxd/db/instances.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lxd/db/instances.go b/lxd/db/instances.go index e9bc0306c7e1..4c686af86fd1 100644 --- a/lxd/db/instances.go +++ b/lxd/db/instances.go @@ -147,6 +147,7 @@ type Instance struct { Name string Project string Location string + Type instancetype.Type } // GetInstancesByMemberAddress returns the instances associated to each cluster member address. @@ -157,7 +158,7 @@ func (c *ClusterTx) GetInstancesByMemberAddress(ctx context.Context, offlineThre var q strings.Builder q.WriteString(`SELECT - instances.id, instances.name, + instances.id, instances.name, instances.type, nodes.id, nodes.name, nodes.address, nodes.heartbeat, projects.name FROM instances @@ -193,7 +194,7 @@ func (c *ClusterTx) GetInstancesByMemberAddress(ctx context.Context, offlineThre var memberAddress string var memberID int64 var memberHeartbeat time.Time - err := rows.Scan(&inst.ID, &inst.Name, &memberID, &inst.Location, &memberAddress, &memberHeartbeat, &inst.Project) + err := rows.Scan(&inst.ID, &inst.Name, &inst.Type, &memberID, &inst.Location, &memberAddress, &memberHeartbeat, &inst.Project) if err != nil { return nil, err } From b4199b2e769fc025b4db512530078ae6b71db177 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Fri, 7 Jul 2023 15:32:06 +0200 Subject: [PATCH 172/543] lxd: Add instance type to instances of offline cluster members This sets the instance type correctly for instances of offline cluster members. Fixes #11947 Signed-off-by: Thomas Hipp --- lxd/instances_get.go | 1 + 1 file changed, 1 insertion(+) diff --git a/lxd/instances_get.go b/lxd/instances_get.go index b1c6ab513926..4a660ffea910 100644 --- a/lxd/instances_get.go +++ b/lxd/instances_get.go @@ -318,6 +318,7 @@ func doInstancesGet(s *state.State, r *http.Request) (any, error) { StatusCode: api.Error, Location: inst.Location, Project: inst.Project, + Type: inst.Type.String(), }, } From 954a7af9fd611eca383af8f5d6215e0a3ba69a4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Fri, 7 Jul 2023 12:29:03 +0200 Subject: [PATCH 173/543] lxd/storage/drivers/zfs: Don't delete the target volume when migrating MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/storage/drivers/driver_zfs_volumes.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lxd/storage/drivers/driver_zfs_volumes.go b/lxd/storage/drivers/driver_zfs_volumes.go index da1beab6e950..2790df61e400 100644 --- a/lxd/storage/drivers/driver_zfs_volumes.go +++ b/lxd/storage/drivers/driver_zfs_volumes.go @@ -885,11 +885,6 @@ func (d *zfs) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, vol // refresh. if len(snapshots) == 0 { volTargetArgs.Refresh = false - - err = d.DeleteVolume(vol, op) - if err != nil { - return fmt.Errorf("Failed deleting volume: %w", err) - } } var respSnapshots []ZFSDataset From d985dd3b75ed6cef68ceab20171869ecf59c1ec3 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 7 Jul 2023 09:13:01 -0400 Subject: [PATCH 174/543] doc/contributing: the default branch was renamed to 'main' Signed-off-by: Simon Deziel --- doc/contributing.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/contributing.md b/doc/contributing.md index ba20b402111a..07f472fd5dc7 100644 --- a/doc/contributing.md +++ b/doc/contributing.md @@ -24,8 +24,7 @@ After building your dependencies, you can now add your GitHub fork as a remote: Then switch to it: - - git checkout myfork/master + git checkout myfork/main ### Building LXD From f024621b22fb46257fc40a404ca961ada4e3811e Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 7 Jul 2023 09:17:39 -0400 Subject: [PATCH 175/543] lxd/resources: update link to systemd github repo Signed-off-by: Simon Deziel --- lxd/resources/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/resources/utils.go b/lxd/resources/utils.go index 8f658572bf66..2c0f16f04027 100644 --- a/lxd/resources/utils.go +++ b/lxd/resources/utils.go @@ -109,7 +109,7 @@ func hasBitField(n []uint32, bit uint) bool { } func udevDecode(s string) (string, error) { - // Inverse of https://github.com/systemd/systemd/blob/master/src/basic/device-nodes.c#L22 + // Inverse of https://github.com/systemd/systemd/blob/main/src/shared/device-nodes.c#L19 ret := "" for i := 0; i < len(s); i++ { // udev converts non-devnode supported chars to four byte encode hex strings. From 0c0f1b80860ac3bea9aabd00cde5188c44fec415 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 7 Jul 2023 09:49:37 -0400 Subject: [PATCH 176/543] lxd/migrate: update link to doc (new path and branch name) Signed-off-by: Simon Deziel --- lxd/migrate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/migrate.go b/lxd/migrate.go index fa11f07f1a0f..69c43b3a088a 100644 --- a/lxd/migrate.go +++ b/lxd/migrate.go @@ -1,6 +1,6 @@ // Package migration provides the primitives for migration in LXD. // -// See https://github.com/canonical/lxd/blob/master/specs/migration.md for a complete +// See https://github.com/canonical/lxd/blob/main/doc/migration.md for a complete // description. package main From e49e06c350398761768d39b3ad5ee14b6b977052 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 7 Jul 2023 09:52:48 -0400 Subject: [PATCH 177/543] lxd/main_cluster: update link to online doc Signed-off-by: Simon Deziel --- lxd/main_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/main_cluster.go b/lxd/main_cluster.go index 41080f24ca61..8776b335da32 100644 --- a/lxd/main_cluster.go +++ b/lxd/main_cluster.go @@ -425,7 +425,7 @@ database, so you can possibly inspect it for further recovery. You'll be able to permanently delete from the database all information about former cluster members by running "lxc cluster remove --force". -See https://linuxcontainers.org/lxd/docs/master/clustering#recover-from-quorum-loss for more +See https://documentation.ubuntu.com/lxd/en/latest/howto/cluster_recover/#recover-from-quorum-loss for more info. Do you want to proceed? (yes/no): `) From 76179fa53496ff9f3007aeb3a87725fda3517a7e Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 7 Jul 2023 10:06:39 -0400 Subject: [PATCH 178/543] grafana: update link to online doc and dashboard ID Signed-off-by: Simon Deziel --- grafana/LXD.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/grafana/LXD.json b/grafana/LXD.json index 038d6c58dc0b..855a7b5faa3e 100644 --- a/grafana/LXD.json +++ b/grafana/LXD.json @@ -64,7 +64,7 @@ "description": "Overview of LXD instances", "editable": false, "fiscalYearStartMonth": 0, - "gnetId": 15726, + "gnetId": 19131, "graphTooltip": 0, "id": null, "links": [ @@ -74,7 +74,7 @@ "targetBlank": true, "title": "Documentation", "type": "link", - "url": "https://linuxcontainers.org/lxd/docs/latest/" + "url": "https://documentation.ubuntu.com/lxd/en/latest/" } ], "liveNow": false, From c8676612c87396552ca28d8641892a14b1a4f1ee Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 7 Jul 2023 10:11:21 -0400 Subject: [PATCH 179/543] lxd/util/http/test: Update hostname s/linuxcontainers.org/example.com/ Signed-off-by: Simon Deziel --- lxd/util/http_test.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/lxd/util/http_test.go b/lxd/util/http_test.go index 2d1aff5eee5a..315d5a97d6ba 100644 --- a/lxd/util/http_test.go +++ b/lxd/util/http_test.go @@ -7,17 +7,17 @@ import ( func ExampleListenAddresses() { listenAddressConfigs := []string{ "", - "127.0.0.1:8000", // Valid IPv4 address with port. - "127.0.0.1", // Valid IPv4 address without port. - "[127.0.0.1]", // Valid wrapped IPv4 address without port. - "[::1]:8000", // Valid IPv6 address with port. - "::1:8000", // Valid IPv6 address without port (that might look like a port). - "::1", // Valid IPv6 address without port. - "[::1]", // Valid wrapped IPv6 address without port. - "linuxcontainers.org", // Valid hostname without port. - "linuxcontainers.org:8000", // Valid hostname with port. - "foo:8000:9000", // Invalid host and port combination. - ":::8000", // Invalid host and port combination. + "127.0.0.1:8000", // Valid IPv4 address with port. + "127.0.0.1", // Valid IPv4 address without port. + "[127.0.0.1]", // Valid wrapped IPv4 address without port. + "[::1]:8000", // Valid IPv6 address with port. + "::1:8000", // Valid IPv6 address without port (that might look like a port). + "::1", // Valid IPv6 address without port. + "[::1]", // Valid wrapped IPv6 address without port. + "example.com", // Valid hostname without port. + "example.com:8000", // Valid hostname with port. + "foo:8000:9000", // Invalid host and port combination. + ":::8000", // Invalid host and port combination. } for _, listlistenAddressConfig := range listenAddressConfigs { @@ -33,8 +33,8 @@ func ExampleListenAddresses() { // "::1:8000": [[::1:8000]:8443] // "::1": [[::1]:8443] // "[::1]": [[::1]:8443] - // "linuxcontainers.org": [linuxcontainers.org:8443] - // "linuxcontainers.org:8000": [linuxcontainers.org:8000] + // "example.com": [example.com:8443] + // "example.com:8000": [example.com:8000] // "foo:8000:9000": [] address foo:8000:9000: too many colons in address // ":::8000": [] address :::8000: too many colons in address } From d17393aee31a338a7a76620ad5ecbbd0ef27dd60 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 7 Jul 2023 10:13:12 -0400 Subject: [PATCH 180/543] shared/api/url/test: Update hostname s/linuxcontainers.org/example.com/ Signed-off-by: Simon Deziel --- shared/api/url_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/shared/api/url_test.go b/shared/api/url_test.go index 5b90e2c30325..769bd7c73a7c 100644 --- a/shared/api/url_test.go +++ b/shared/api/url_test.go @@ -11,7 +11,7 @@ func ExampleURL() { fmt.Println(u.Project("project-with-%-in-it")) fmt.Println(u.Target("")) fmt.Println(u.Target("member-with-%-in-it")) - fmt.Println(u.Host("linuxcontainers.org")) + fmt.Println(u.Host("example.com")) fmt.Println(u.Scheme("https")) // Output: /1.0/networks/name-with-%252F-in-it @@ -19,6 +19,6 @@ func ExampleURL() { // /1.0/networks/name-with-%252F-in-it?project=project-with-%25-in-it // /1.0/networks/name-with-%252F-in-it?project=project-with-%25-in-it // /1.0/networks/name-with-%252F-in-it?project=project-with-%25-in-it&target=member-with-%25-in-it - // //linuxcontainers.org/1.0/networks/name-with-%252F-in-it?project=project-with-%25-in-it&target=member-with-%25-in-it - // https://linuxcontainers.org/1.0/networks/name-with-%252F-in-it?project=project-with-%25-in-it&target=member-with-%25-in-it + // //example.com/1.0/networks/name-with-%252F-in-it?project=project-with-%25-in-it&target=member-with-%25-in-it + // https://example.com/1.0/networks/name-with-%252F-in-it?project=project-with-%25-in-it&target=member-with-%25-in-it } From e3e90a1ff68e79cdf40dd805bcf7d404e606b5a4 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 7 Jul 2023 10:14:22 -0400 Subject: [PATCH 181/543] shared/cert: replace org name by "LXD" Signed-off-by: Simon Deziel --- shared/cert.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared/cert.go b/shared/cert.go index 0d04eb9c79df..9c68447c963c 100644 --- a/shared/cert.go +++ b/shared/cert.go @@ -341,7 +341,7 @@ func GenerateMemCert(client bool, addHosts bool) ([]byte, []byte, error) { template := x509.Certificate{ SerialNumber: serialNumber, Subject: pkix.Name{ - Organization: []string{"linuxcontainers.org"}, + Organization: []string{"LXD"}, CommonName: fmt.Sprintf("%s@%s", username, hostname), }, NotBefore: validFrom, From 7214aa8d60ba3afc7b0b24bd324f4e2a46e4da29 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Fri, 7 Jul 2023 18:53:01 +0200 Subject: [PATCH 182/543] lxd: Check project permissions when importing from backup Fixes #11958 Signed-off-by: Thomas Hipp --- lxd/instances_post.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/lxd/instances_post.go b/lxd/instances_post.go index 165294f497bd..385d7ec4c9f6 100644 --- a/lxd/instances_post.go +++ b/lxd/instances_post.go @@ -625,6 +625,21 @@ func createFromBackup(s *state.State, r *http.Request, projectName string, data return response.BadRequest(err) } + // Check project permissions. + err = s.DB.Cluster.Transaction(s.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error { + req := api.InstancesPost{ + InstancePut: bInfo.Config.Container.InstancePut, + Name: bInfo.Name, + Source: api.InstanceSource{}, // Only relevant for "copy" or "migration", but may not be nil. + Type: api.InstanceType(bInfo.Config.Container.Type), + } + + return project.AllowInstanceCreation(tx, projectName, req) + }) + if err != nil { + return response.SmartError(err) + } + bInfo.Project = projectName // Override pool. From ba27773d08e518cb4a9fb038acfc210ca3a72854 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Mon, 10 Jul 2023 11:18:03 +0200 Subject: [PATCH 183/543] lxd/instance/drivers/qemu: Skip every other vsock syscall error except ENODEV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This handles a panic condition if the connection attempt to the vsock returns syscall errors other than ENODEV. In such situations the socket was never opened up and therefore cannot be closed with c.Close(). Use the happy path style for freeVsockID function. Signed-off-by: Julian Pelizäus --- lxd/instance/drivers/driver_qemu.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index ab8abeee38f7..6f3e3900ca5c 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -7286,11 +7286,14 @@ func (d *qemu) freeVsockID(vsockID uint32) bool { return false } - if unixErrno == unix.ENODEV { - // The syscall to the vsock device returned "no such device". - // This means the address (Context ID) is free. - return true + if unixErrno != unix.ENODEV { + // Skip the vsockID if another syscall error was encountered. + return false } + + // The syscall to the vsock device returned "no such device". + // This means the address (Context ID) is free. + return true } // Address is already in use. From 8929b52f5acf8e5b7ff85da00ac169a1e5bff523 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Mon, 10 Jul 2023 13:00:17 +0200 Subject: [PATCH 184/543] test: Check project limits when importing instances Signed-off-by: Thomas Hipp --- test/suites/projects.sh | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/test/suites/projects.sh b/test/suites/projects.sh index 0d77a5d935a4..a656cfddae77 100644 --- a/test/suites/projects.sh +++ b/test/suites/projects.sh @@ -769,6 +769,39 @@ test_projects_limits() { lxc project switch default lxc project delete p1 + # Start with clean project. + lxc project create p1 + lxc project switch p1 + + # Relaxing restricted.containers.lowlevel to 'allow' makes it possible set + # low-level keys. + lxc project set p1 restricted.containers.lowlevel allow + + # Add a root device to the default profile of the project and import an image. + pool="lxdtest-$(basename "${LXD_DIR}")" + lxc profile device add default root disk path="/" pool="${pool}" + + deps/import-busybox --project p1 --alias testimage + + # Create a couple of containers in the project. + lxc init testimage c1 -c limits.memory=1GiB + lxc init testimage c2 -c limits.memory=1GiB + + lxc export c1 + lxc delete c1 + + # Configure a valid project memory limit. + lxc project set p1 limits.memory 1GiB + + # Can't import the backup as it would exceed the 1GiB project memory limit. + ! lxc import c1.tar.gz || false + + rm c1.tar.gz + lxc delete c2 + lxc image delete testimage + lxc project switch default + lxc project delete p1 + if [ "${LXD_BACKEND}" = "dir" ] || [ "${LXD_BACKEND}" = "zfs" ]; then lxc remote remove l2 kill_lxd "$LXD_REMOTE_DIR" From efac802cf77bf427341712044dc8c8342836d107 Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Mon, 10 Jul 2023 12:18:14 +0100 Subject: [PATCH 185/543] lxd/firewall/drivers: Optimize xtables network forward rule generation. Signed-off-by: Mark Laing --- lxd/firewall/drivers/drivers_xtables.go | 139 +++++++++++++----------- 1 file changed, 78 insertions(+), 61 deletions(-) diff --git a/lxd/firewall/drivers/drivers_xtables.go b/lxd/firewall/drivers/drivers_xtables.go index 69925ebdb9c6..eb1bb45b3059 100644 --- a/lxd/firewall/drivers/drivers_xtables.go +++ b/lxd/firewall/drivers/drivers_xtables.go @@ -1451,41 +1451,67 @@ func (d Xtables) iptablesChainDelete(ipVersion uint, table string, chain string, // NetworkApplyForwards apply network address forward rules to firewall. func (d Xtables) NetworkApplyForwards(networkName string, rules []AddressForward) error { + // Validate all rules first. + for i, rule := range rules { + if rule.ListenAddress == nil { + return fmt.Errorf("Invalid rule %d, listen address is required", i) + } + + if rule.TargetAddress == nil { + return fmt.Errorf("Invalid rule %d, target address is required", i) + } + + listenPortLen := len(rule.ListenPorts) + if listenPortLen == 0 && rule.Protocol != "" { + return fmt.Errorf("Invalid rule %d, default target rule but non-empty protocol", i) + } + + targetPortLen := len(rule.TargetPorts) + if targetPortLen > 1 && targetPortLen != listenPortLen { + return fmt.Errorf("Invalid rule %d, mismatch between listen port(s) and target port(s) count", i) + } + } + comment := d.networkForwardIPTablesComment(networkName) + clearNetworkForwards := func() error { + for _, ipVersion := range []uint{4, 6} { + err := d.iptablesClear(ipVersion, []string{comment}, "nat") + if err != nil { + return err + } + } + + return nil + } + // Clear any forward rules associated to the network. - for _, ipVersion := range []uint{4, 6} { - err := d.iptablesClear(ipVersion, []string{comment}, "nat") + err := clearNetworkForwards() + if err != nil { + return err + } + + reverter := revert.New() + defer reverter.Fail() + + // Clear all network forwards if we fail, otherwise the forwards are only partially applied. + reverter.Add(func() { + err := clearNetworkForwards() if err != nil { - return err + logger.Error("Failed to clear firewall rules after failing to apply network forwards", logger.Ctx{"network_name": networkName, "error": err}) } - } + }) // Build up rules, ordering by default target rules first, followed by port specific listen rules. // This is so the generated firewall rules will apply the port specific rules first (they are prepended). for _, listenPortsOnly := range []bool{false, true} { - for ruleIndex, rule := range rules { - if rule.ListenAddress == nil { - return fmt.Errorf("Invalid rule %d, listen address is required", ruleIndex) - } - - if rule.TargetAddress == nil { - return fmt.Errorf("Invalid rule %d, target address is required", ruleIndex) - } - - listenPortsLen := len(rule.ListenPorts) - + for _, rule := range rules { // Process the rules in order of outer loop. + listenPortsLen := len(rule.ListenPorts) if (listenPortsOnly && listenPortsLen < 1) || (!listenPortsOnly && listenPortsLen > 0) { continue } - // If multiple target ports supplied, check they match the listen port(s) count. - targetPortsLen := len(rule.TargetPorts) - if targetPortsLen > 1 && targetPortsLen != listenPortsLen { - return fmt.Errorf("Invalid rule %d, mismatch between listen port(s) and target port(s) count", ruleIndex) - } - ipVersion := uint(4) if rule.ListenAddress.To4() == nil { ipVersion = 6 @@ -1494,58 +1520,50 @@ func (d Xtables) NetworkApplyForwards(networkName string, rules []AddressForward listenAddressStr := rule.ListenAddress.String() targetAddressStr := rule.TargetAddress.String() - if listenPortsLen > 0 { - for i := range rule.ListenPorts { - // Use the target port that corresponds to the listen port (unless only 1 - // is specified, in which case use same target port for all listen ports). - var targetPort uint64 - - switch { - case targetPortsLen <= 0: - // No target ports specified, use same port as listen port index. - targetPort = rule.ListenPorts[i] - case targetPortsLen == 1: - // Single target port specified, use that for all listen ports. - targetPort = rule.TargetPorts[0] - case targetPortsLen > 1: - // Multiple target ports specified, user port associated with - // listen port index. - targetPort = rule.TargetPorts[i] - } + if rule.Protocol != "" { + if len(rule.TargetPorts) == 0 { + rule.TargetPorts = rule.ListenPorts + } - // Format the destination host/port as appropriate. - targetDest := fmt.Sprintf("%s:%d", targetAddressStr, targetPort) - if ipVersion == 6 { - targetDest = fmt.Sprintf("[%s]:%d", targetAddressStr, targetPort) + targetPortRanges := portRangesFromSlice(rule.TargetPorts) + for _, targetPortRange := range targetPortRanges { + targetPortRangeStr := portRangeStr(targetPortRange, ":") + + // Apply MASQUERADE rule for each target range. + // instance <-> instance. + // Requires instance's bridge port has hairpin mode enabled when br_netfilter is loaded. + err := d.iptablesPrepend(ipVersion, comment, "nat", "POSTROUTING", "-p", rule.Protocol, "--source", targetAddressStr, "--destination", targetAddressStr, "--dport", targetPortRangeStr, "-j", "MASQUERADE") + if err != nil { + return err } + } + + dnatRanges := getOptimisedDNATRanges(&rule) + for listenPortRange, targetPortRange := range dnatRanges { + listenPortRangeStr := portRangeStr(listenPortRange, ":") + targetDest := targetAddressStr - listenPortStr := fmt.Sprintf("%d", rule.ListenPorts[i]) - targetPortStr := fmt.Sprintf("%d", targetPort) + if targetPortRange[1] == 1 { + targetPortStr := portRangeStr(targetPortRange, ":") + targetDest = fmt.Sprintf("%s:%s", targetAddressStr, targetPortStr) + if ipVersion == 6 { + targetDest = fmt.Sprintf("[%s]:%s", targetAddressStr, targetPortStr) + } + } // outbound <-> instance. - err := d.iptablesPrepend(ipVersion, comment, "nat", "PREROUTING", "-p", rule.Protocol, "--destination", listenAddressStr, "--dport", listenPortStr, "-j", "DNAT", "--to-destination", targetDest) + err := d.iptablesPrepend(ipVersion, comment, "nat", "PREROUTING", "-p", rule.Protocol, "--destination", listenAddressStr, "--dport", listenPortRangeStr, "-j", "DNAT", "--to-destination", targetDest) if err != nil { return err } // host <-> instance. - err = d.iptablesPrepend(ipVersion, comment, "nat", "OUTPUT", "-p", rule.Protocol, "--destination", listenAddressStr, "--dport", listenPortStr, "-j", "DNAT", "--to-destination", targetDest) + err = d.iptablesPrepend(ipVersion, comment, "nat", "OUTPUT", "-p", rule.Protocol, "--destination", listenAddressStr, "--dport", listenPortRangeStr, "-j", "DNAT", "--to-destination", targetDest) if err != nil { return err } - - // Only add >1 hairpin NAT rules if multiple target ports being used. - if i == 0 || targetPortsLen != 1 { - // instance <-> instance. - // Requires instance's bridge port has hairpin mode enabled when - // br_netfilter is loaded. - err = d.iptablesPrepend(ipVersion, comment, "nat", "POSTROUTING", "-p", rule.Protocol, "--source", targetAddressStr, "--destination", targetAddressStr, "--dport", targetPortStr, "-j", "MASQUERADE") - if err != nil { - return err - } - } } - } else if rule.Protocol == "" { + } else { // Format the destination host/port as appropriate. targetDest := targetAddressStr if ipVersion == 6 { @@ -1571,11 +1589,10 @@ func (d Xtables) NetworkApplyForwards(networkName string, rules []AddressForward if err != nil { return err } - } else { - return fmt.Errorf("Invalid rule %d, default target rule but non-empty protocol", ruleIndex) } } } + reverter.Success() return nil } From 12d2e3e2751ddf688f0d49689c0e916431fe248c Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Mon, 10 Jul 2023 12:34:14 +0100 Subject: [PATCH 186/543] lxd/firewall/drivers: Optimise nftables network forward rule generation. Signed-off-by: Mark Laing --- lxd/firewall/drivers/drivers_nftables.go | 89 ++++++++++++------------ 1 file changed, 43 insertions(+), 46 deletions(-) diff --git a/lxd/firewall/drivers/drivers_nftables.go b/lxd/firewall/drivers/drivers_nftables.go index bbd287f8a259..4fd9e4f726ba 100644 --- a/lxd/firewall/drivers/drivers_nftables.go +++ b/lxd/firewall/drivers/drivers_nftables.go @@ -929,6 +929,13 @@ func (d Nftables) NetworkApplyForwards(networkName string, rules []AddressForwar // This is so the generated firewall rules will apply the port specific rules first. for _, listenPortsOnly := range []bool{true, false} { for ruleIndex, rule := range rules { + // Process the rules in order of outer loop. + listenPortsLen := len(rule.ListenPorts) + if (listenPortsOnly && listenPortsLen < 1) || (!listenPortsOnly && listenPortsLen > 0) { + continue + } + + // Validate the rule. if rule.ListenAddress == nil { return fmt.Errorf("Invalid rule %d, listen address is required", ruleIndex) } @@ -937,16 +944,21 @@ func (d Nftables) NetworkApplyForwards(networkName string, rules []AddressForwar return fmt.Errorf("Invalid rule %d, target address is required", ruleIndex) } - listenPortsLen := len(rule.ListenPorts) - - // Process the rules in order of outer loop. - if (listenPortsOnly && listenPortsLen < 1) || (!listenPortsOnly && listenPortsLen > 0) { - continue + if listenPortsLen == 0 && rule.Protocol != "" { + return fmt.Errorf("Invalid rule %d, default target rule but non-empty protocol", ruleIndex) } - // If multiple target ports supplied, check they match the listen port(s) count. - targetPortsLen := len(rule.TargetPorts) - if targetPortsLen > 1 && targetPortsLen != listenPortsLen { + switch len(rule.TargetPorts) { + case 0: + // No target ports specified, use listen ports (only valid when protocol is specified). + rule.TargetPorts = rule.ListenPorts + case 1: + // Single target port specified, OK. + break + case len(rule.ListenPorts): + // One-to-one match with listen ports, OK. + break + default: return fmt.Errorf("Invalid rule %d, mismatch between listen port(s) and target port(s) count", ruleIndex) } @@ -958,52 +970,39 @@ func (d Nftables) NetworkApplyForwards(networkName string, rules []AddressForwar listenAddressStr := rule.ListenAddress.String() targetAddressStr := rule.TargetAddress.String() - if listenPortsLen > 0 { - for i := range rule.ListenPorts { - // Use the target port that corresponds to the listen port (unless only 1 - // is specified, in which case use same target port for all listen ports). - var targetPort uint64 - - switch { - case targetPortsLen <= 0: - // No target ports specified, use same port as listen port index. - targetPort = rule.ListenPorts[i] - case targetPortsLen == 1: - // Single target port specified, use that for all listen ports. - targetPort = rule.TargetPorts[0] - case targetPortsLen > 1: - // Multiple target ports specified, user port associated with - // listen port index. - targetPort = rule.TargetPorts[i] - } + if rule.Protocol != "" { + targetPortRanges := portRangesFromSlice(rule.TargetPorts) + for _, targetPortRange := range targetPortRanges { + targetPortRangeStr := portRangeStr(targetPortRange, "-") + snatRules = append(snatRules, map[string]any{ + "ipFamily": ipFamily, + "protocol": rule.Protocol, + "targetHost": targetAddressStr, + "targetPorts": targetPortRangeStr, + }) + } - // Format the destination host/port as appropriate. - targetDest := fmt.Sprintf("%s:%d", targetAddressStr, targetPort) - if ipFamily == "ip6" { - targetDest = fmt.Sprintf("[%s]:%d", targetAddressStr, targetPort) + dnatRanges := getOptimisedDNATRanges(&rule) + for listenPortRange, targetPortRange := range dnatRanges { + // Format the destination host/port as appropriate + targetDest := targetAddressStr + if targetPortRange[1] == 1 { + targetPortStr := portRangeStr(targetPortRange, ":") + targetDest = fmt.Sprintf("%s:%s", targetAddressStr, targetPortStr) + if ipFamily == "ip6" { + targetDest = fmt.Sprintf("[%s]:%s", targetAddressStr, targetPortStr) + } } dnatRules = append(dnatRules, map[string]any{ "ipFamily": ipFamily, "protocol": rule.Protocol, "listenAddress": listenAddressStr, - "listenPorts": rule.ListenPorts[i], + "listenPorts": portRangeStr(listenPortRange, "-"), "targetDest": targetDest, - "targetHost": targetAddressStr, - "targetPorts": targetPort, }) - - // Only add >1 hairpin NAT rules if multiple target ports being used. - if i == 0 || targetPortsLen != 1 { - snatRules = append(snatRules, map[string]any{ - "ipFamily": ipFamily, - "protocol": rule.Protocol, - "targetHost": targetAddressStr, - "targetPorts": targetPort, - }) - } } - } else if rule.Protocol == "" { + } else { // Format the destination host/port as appropriate. targetDest := targetAddressStr if ipFamily == "ip6" { @@ -1021,8 +1020,6 @@ func (d Nftables) NetworkApplyForwards(networkName string, rules []AddressForwar "ipFamily": ipFamily, "targetHost": targetAddressStr, }) - } else { - return fmt.Errorf("Invalid rule %d, default target rule but non-empty protocol", ruleIndex) } } } From 02bcc16e49f5eac087700b8c3dc504a5e66ab4cf Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Mon, 10 Jul 2023 13:41:19 +0100 Subject: [PATCH 187/543] test/suites: Updates network forward suite for optimised xtables rules. Signed-off-by: Mark Laing --- test/suites/network_forward.sh | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/test/suites/network_forward.sh b/test/suites/network_forward.sh index d15768c74bdf..bc77061f2fdd 100644 --- a/test/suites/network_forward.sh +++ b/test/suites/network_forward.sh @@ -59,12 +59,9 @@ test_network_forward() { # Check can add a port with a listener range and no target port (so it uses same range for target ports). lxc network forward port add "${netName}" 198.51.100.1 tcp 80-81 192.0.2.3 if [ "$firewallDriver" = "xtables" ]; then - iptables -w -t nat -S | grep -- "-A PREROUTING -d 198.51.100.1/32 -p tcp -m tcp --dport 81 -m comment --comment \"generated for LXD network-forward ${netName}\" -j DNAT --to-destination 192.0.2.3:81" - iptables -w -t nat -S | grep -- "-A PREROUTING -d 198.51.100.1/32 -p tcp -m tcp --dport 80 -m comment --comment \"generated for LXD network-forward ${netName}\" -j DNAT --to-destination 192.0.2.3:80" - iptables -w -t nat -S | grep -- "-A OUTPUT -d 198.51.100.1/32 -p tcp -m tcp --dport 81 -m comment --comment \"generated for LXD network-forward ${netName}\" -j DNAT --to-destination 192.0.2.3:81" - iptables -w -t nat -S | grep -- "-A OUTPUT -d 198.51.100.1/32 -p tcp -m tcp --dport 80 -m comment --comment \"generated for LXD network-forward ${netName}\" -j DNAT --to-destination 192.0.2.3:80" - iptables -w -t nat -S | grep -- "-A POSTROUTING -s 192.0.2.3/32 -d 192.0.2.3/32 -p tcp -m tcp --dport 81 -m comment --comment \"generated for LXD network-forward ${netName}\" -j MASQUERADE" - iptables -w -t nat -S | grep -- "-A POSTROUTING -s 192.0.2.3/32 -d 192.0.2.3/32 -p tcp -m tcp --dport 80 -m comment --comment \"generated for LXD network-forward ${netName}\" -j MASQUERADE" + iptables -w -t nat -S | grep -- "-A PREROUTING -d 198.51.100.1/32 -p tcp -m tcp --dport 80:81 -m comment --comment \"generated for LXD network-forward ${netName}\" -j DNAT --to-destination 192.0.2.3" + iptables -w -t nat -S | grep -- "-A OUTPUT -d 198.51.100.1/32 -p tcp -m tcp --dport 80:81 -m comment --comment \"generated for LXD network-forward ${netName}\" -j DNAT --to-destination 192.0.2.3" + iptables -w -t nat -S | grep -- "-A POSTROUTING -s 192.0.2.3/32 -d 192.0.2.3/32 -p tcp -m tcp --dport 80:81 -m comment --comment \"generated for LXD network-forward ${netName}\" -j MASQUERADE" else nft -nn list chain inet lxd "fwdprert.${netName}" | grep "ip daddr 198.51.100.1 tcp dport 80 dnat ip to 192.0.2.3:80" nft -nn list chain inet lxd "fwdprert.${netName}" | grep "ip daddr 198.51.100.1 tcp dport 81 dnat ip to 192.0.2.3:81" @@ -101,9 +98,7 @@ test_network_forward() { iptables -w -t nat -S | grep -- "-A OUTPUT -d 198.51.100.1/32 -p tcp -m tcp --dport 84 -m comment --comment \"generated for LXD network-forward ${netName}\" -j DNAT --to-destination 192.0.2.3:92" iptables -w -t nat -S | grep -- "-A OUTPUT -d 198.51.100.1/32 -p tcp -m tcp --dport 83 -m comment --comment \"generated for LXD network-forward ${netName}\" -j DNAT --to-destination 192.0.2.3:91" iptables -w -t nat -S | grep -- "-A OUTPUT -d 198.51.100.1/32 -p tcp -m tcp --dport 82 -m comment --comment \"generated for LXD network-forward ${netName}\" -j DNAT --to-destination 192.0.2.3:90" - iptables -w -t nat -S | grep -- "-A POSTROUTING -s 192.0.2.3/32 -d 192.0.2.3/32 -p tcp -m tcp --dport 92 -m comment --comment \"generated for LXD network-forward ${netName}\" -j MASQUERADE" - iptables -w -t nat -S | grep -- "-A POSTROUTING -s 192.0.2.3/32 -d 192.0.2.3/32 -p tcp -m tcp --dport 91 -m comment --comment \"generated for LXD network-forward ${netName}\" -j MASQUERADE" - iptables -w -t nat -S | grep -- "-A POSTROUTING -s 192.0.2.3/32 -d 192.0.2.3/32 -p tcp -m tcp --dport 90 -m comment --comment \"generated for LXD network-forward ${netName}\" -j MASQUERADE" + iptables -w -t nat -S | grep -- "-A POSTROUTING -s 192.0.2.3/32 -d 192.0.2.3/32 -p tcp -m tcp --dport 90:92 -m comment --comment \"generated for LXD network-forward ${netName}\" -j MASQUERADE" else nft -nn list chain inet lxd "fwdprert.${netName}" | grep "ip daddr 198.51.100.1 tcp dport 82 dnat ip to 192.0.2.3:90" nft -nn list chain inet lxd "fwdprert.${netName}" | grep "ip daddr 198.51.100.1 tcp dport 83 dnat ip to 192.0.2.3:91" @@ -118,7 +113,7 @@ test_network_forward() { # Check deleting multiple rules is prevented without --force, and that it takes effect with --force. if [ "$firewallDriver" = "xtables" ]; then - [ "$(iptables -w -t nat -S | grep -c "generated for LXD network-forward ${netName}")" -eq 21 ] + [ "$(iptables -w -t nat -S | grep -c "generated for LXD network-forward ${netName}")" -eq 16 ] else [ "$(nft -nn list chain inet lxd "fwdprert.${netName}" | wc -l)" -eq 12 ] [ "$(nft -nn list chain inet lxd "fwdout.${netName}"| wc -l)" -eq 12 ] From 2c4bff6b63a854689ea0b2df9ce99e33591b99a8 Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Mon, 10 Jul 2023 14:08:53 +0100 Subject: [PATCH 188/543] test/suites: Updates network forward suite for optimised nftables rules. Signed-off-by: Mark Laing --- test/suites/network_forward.sh | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/test/suites/network_forward.sh b/test/suites/network_forward.sh index bc77061f2fdd..a1d6e1fec2ab 100644 --- a/test/suites/network_forward.sh +++ b/test/suites/network_forward.sh @@ -63,12 +63,9 @@ test_network_forward() { iptables -w -t nat -S | grep -- "-A OUTPUT -d 198.51.100.1/32 -p tcp -m tcp --dport 80:81 -m comment --comment \"generated for LXD network-forward ${netName}\" -j DNAT --to-destination 192.0.2.3" iptables -w -t nat -S | grep -- "-A POSTROUTING -s 192.0.2.3/32 -d 192.0.2.3/32 -p tcp -m tcp --dport 80:81 -m comment --comment \"generated for LXD network-forward ${netName}\" -j MASQUERADE" else - nft -nn list chain inet lxd "fwdprert.${netName}" | grep "ip daddr 198.51.100.1 tcp dport 80 dnat ip to 192.0.2.3:80" - nft -nn list chain inet lxd "fwdprert.${netName}" | grep "ip daddr 198.51.100.1 tcp dport 81 dnat ip to 192.0.2.3:81" - nft -nn list chain inet lxd "fwdout.${netName}" | grep "ip daddr 198.51.100.1 tcp dport 80 dnat ip to 192.0.2.3:80" - nft -nn list chain inet lxd "fwdout.${netName}" | grep "ip daddr 198.51.100.1 tcp dport 81 dnat ip to 192.0.2.3:81" - nft -nn list chain inet lxd "fwdpstrt.${netName}" | grep "ip saddr 192.0.2.3 ip daddr 192.0.2.3 tcp dport 80 masquerade" - nft -nn list chain inet lxd "fwdpstrt.${netName}" | grep "ip saddr 192.0.2.3 ip daddr 192.0.2.3 tcp dport 81 masquerade" + nft -nn list chain inet lxd "fwdprert.${netName}" | grep "ip daddr 198.51.100.1 tcp dport 80-81 dnat ip to 192.0.2.3" + nft -nn list chain inet lxd "fwdout.${netName}" | grep "ip daddr 198.51.100.1 tcp dport 80-81 dnat ip to 192.0.2.3" + nft -nn list chain inet lxd "fwdpstrt.${netName}" | grep "ip saddr 192.0.2.3 ip daddr 192.0.2.3 tcp dport 80-81 masquerade" fi # Check can't add port with duplicate listen port. @@ -106,18 +103,16 @@ test_network_forward() { nft -nn list chain inet lxd "fwdout.${netName}" | grep "ip daddr 198.51.100.1 tcp dport 82 dnat ip to 192.0.2.3:90" nft -nn list chain inet lxd "fwdout.${netName}" | grep "ip daddr 198.51.100.1 tcp dport 83 dnat ip to 192.0.2.3:91" nft -nn list chain inet lxd "fwdout.${netName}" | grep "ip daddr 198.51.100.1 tcp dport 84 dnat ip to 192.0.2.3:92" - nft -nn list chain inet lxd "fwdpstrt.${netName}" | grep "ip saddr 192.0.2.3 ip daddr 192.0.2.3 tcp dport 90 masquerade" - nft -nn list chain inet lxd "fwdpstrt.${netName}" | grep "ip saddr 192.0.2.3 ip daddr 192.0.2.3 tcp dport 91 masquerade" - nft -nn list chain inet lxd "fwdpstrt.${netName}" | grep "ip saddr 192.0.2.3 ip daddr 192.0.2.3 tcp dport 92 masquerade" + nft -nn list chain inet lxd "fwdpstrt.${netName}" | grep "ip saddr 192.0.2.3 ip daddr 192.0.2.3 tcp dport 90-92 masquerade" fi # Check deleting multiple rules is prevented without --force, and that it takes effect with --force. if [ "$firewallDriver" = "xtables" ]; then [ "$(iptables -w -t nat -S | grep -c "generated for LXD network-forward ${netName}")" -eq 16 ] else - [ "$(nft -nn list chain inet lxd "fwdprert.${netName}" | wc -l)" -eq 12 ] - [ "$(nft -nn list chain inet lxd "fwdout.${netName}"| wc -l)" -eq 12 ] - [ "$(nft -nn list chain inet lxd "fwdpstrt.${netName}" | wc -l)" -eq 12 ] + [ "$(nft -nn list chain inet lxd "fwdprert.${netName}" | wc -l)" -eq 11 ] + [ "$(nft -nn list chain inet lxd "fwdout.${netName}"| wc -l)" -eq 11 ] + [ "$(nft -nn list chain inet lxd "fwdpstrt.${netName}" | wc -l)" -eq 9 ] fi ! lxc network forward port remove "${netName}" 198.51.100.1 tcp || false From 2e2655eb5c1d5a30027db329076770a9ec647987 Mon Sep 17 00:00:00 2001 From: Bilal Khan Date: Tue, 11 Jul 2023 10:38:44 +0500 Subject: [PATCH 189/543] Added the headings in multiple functions of the config.go file Signed-off-by: Bilal Khan --- lxc/config.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/lxc/config.go b/lxc/config.go index cf9af28004de..cbc84a3cc9b0 100644 --- a/lxc/config.go +++ b/lxc/config.go @@ -23,6 +23,8 @@ type cmdConfig struct { flagTarget string } +// Command creates a Cobra command for managing instance and server configurations, +// including options for device, edit, get, metadata, profile, set, show, template, trust, and unset. func (c *cmdConfig) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("config") @@ -85,6 +87,7 @@ type cmdConfigEdit struct { config *cmdConfig } +// Command creates a Cobra command to edit instance or server configurations using YAML, with optional flags for targeting cluster members. func (c *cmdConfigEdit) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("edit", i18n.G("[:][[/]]")) @@ -101,6 +104,7 @@ func (c *cmdConfigEdit) Command() *cobra.Command { return cmd } +// helpTemplate returns a sample YAML configuration and guidelines for editing instance configurations. func (c *cmdConfigEdit) helpTemplate() string { return i18n.G( `### This is a YAML representation of the configuration. @@ -122,6 +126,7 @@ func (c *cmdConfigEdit) helpTemplate() string { ### Note that the name is shown but cannot be changed`) } +// Run executes the config edit command, allowing users to edit instance or server configurations via an interactive YAML editor. func (c *cmdConfigEdit) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 0, 1) @@ -365,6 +370,8 @@ type cmdConfigGet struct { flagExpanded bool } +// Command creates a Cobra command to fetch values for given instance or server configuration keys, +// with optional flags for expanded configuration and cluster targeting. func (c *cmdConfigGet) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("get", i18n.G("[:][] ")) @@ -379,6 +386,7 @@ func (c *cmdConfigGet) Command() *cobra.Command { return cmd } +// Run fetches and prints the specified configuration key's value for an instance or server, also handling target and expansion flags. func (c *cmdConfigGet) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 2) @@ -457,6 +465,7 @@ type cmdConfigSet struct { config *cmdConfig } +// Command creates a new Cobra command to set instance or server configuration keys and returns it. func (c *cmdConfigSet) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("set", i18n.G("[:][] =...")) @@ -482,6 +491,7 @@ lxc config set core.trust_password=blah return cmd } +// Run executes the "set" command, updating instance or server configuration keys based on provided arguments. func (c *cmdConfigSet) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, -1) @@ -619,6 +629,7 @@ type cmdConfigShow struct { flagExpanded bool } +// Command sets up the "show" command, which displays instance or server configurations based on the provided arguments. func (c *cmdConfigShow) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("show", i18n.G("[:][[/]]")) @@ -633,6 +644,7 @@ func (c *cmdConfigShow) Command() *cobra.Command { return cmd } +// Run executes the "show" command, displaying the YAML-formatted configuration of a specified server or instance. func (c *cmdConfigShow) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 0, 1) @@ -739,6 +751,7 @@ type cmdConfigUnset struct { configSet *cmdConfigSet } +// Command generates a new "unset" command to remove specific configuration keys for an instance or server. func (c *cmdConfigUnset) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("unset", i18n.G("[:][] ")) @@ -752,6 +765,7 @@ func (c *cmdConfigUnset) Command() *cobra.Command { return cmd } +// Run executes the "unset" command, delegating to the "set" command to remove specific configuration keys. func (c *cmdConfigUnset) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 2) From 70e047319b9d171e1e912923d834daba2ae47a52 Mon Sep 17 00:00:00 2001 From: Ruth Fuchss Date: Tue, 11 Jul 2023 10:46:46 +0200 Subject: [PATCH 190/543] doc/storage: clarify when to specify storage volume type Also change storage volume type `vm` to `virtual-machine`. Signed-off-by: Ruth Fuchss --- doc/explanation/storage.md | 2 +- doc/howto/storage_volumes.md | 26 +++++++++++++++----------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/doc/explanation/storage.md b/doc/explanation/storage.md index 166784823310..16588d8bd0f0 100644 --- a/doc/explanation/storage.md +++ b/doc/explanation/storage.md @@ -112,7 +112,7 @@ See the following how-to guides for additional information: Storage volumes can be of the following types: -`container`/`vm` +`container`/`virtual-machine` : LXD automatically creates one of these storage volumes when you launch an instance. It is used as the root disk for the instance, and it is destroyed when the instance is deleted. diff --git a/doc/howto/storage_volumes.md b/doc/howto/storage_volumes.md index 3f3b4426f8db..11c7a11c609c 100644 --- a/doc/howto/storage_volumes.md +++ b/doc/howto/storage_volumes.md @@ -115,19 +115,21 @@ See the {ref}`storage-drivers` documentation for the available configuration opt Use the following command to set configuration options for a storage volume: - lxc storage volume set + lxc storage volume set [/] -For example, to set the snapshot expiry time to one month, use the following command: +The default {ref}`storage volume type ` is `custom`, so you can leave out the `/` when configuring a custom storage volume. - lxc storage volume set my-pool my-volume snapshots.expiry 1M +For example, to set the size of your custom storage volume `my-volume` to 1 GiB, use the following command: -To configure an instance storage volume, specify the volume name including the {ref}`storage volume type `, for example: + lxc storage volume set my-pool my-volume size=1GiB - lxc storage volume set my-pool container/my-container-volume user.XXX value +To set the snapshot expiry time for your virtual machine `my-vm` to one month, use the following command: + + lxc storage volume set my-pool virtual-machine/my-vm snapshots.expiry 1M You can also edit the storage volume configuration by using the following command: - lxc storage volume edit + lxc storage volume edit [/] (storage-configure-vol-default)= ### Configure default values for storage volumes @@ -136,7 +138,7 @@ You can define default volume configurations for a storage pool. To do so, set a storage pool configuration with a `volume` prefix, thus `volume.=`. This value is then used for all new storage volumes in the pool, unless it is set explicitly for a volume or an instance. -In general, the defaults set on a storage pool level (before the volume was created) can be overridden through the volume configuration, and the volume configuration can be overridden through the instance configuration (for storage volumes of {ref}`type ` `container` or `vm`). +In general, the defaults set on a storage pool level (before the volume was created) can be overridden through the volume configuration, and the volume configuration can be overridden through the instance configuration (for storage volumes of {ref}`type ` `container` or `virtual-machine`). For example, to set a default volume size for a storage pool, use the following command: @@ -159,13 +161,15 @@ Custom storage volumes might use the same name as instance volumes (for example, Therefore, to distinguish between instance storage volumes and custom storage volumes, all instance storage volumes must be referred to as `/` (for example, `container/c1` or `virtual-machine/vm`) in commands. ``` -To show detailed information about a specific custom volume, use the following command: +To show detailed configuration information about a specific volume, use the following command: + + lxc storage volume show [/] - lxc storage volume show +To show state information about a specific volume, use the following command: -To show detailed information about a specific instance volume, use the following command: + lxc storage volume info [/] - lxc storage volume show / +In both commands, the default {ref}`storage volume type ` is `custom`, so you can leave out the `/` when displaying information about a custom storage volume. ## Resize a storage volume From dc924968f961f21aa18eae9d32d5886a21cf139a Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Tue, 11 Jul 2023 14:47:04 +0100 Subject: [PATCH 191/543] lxd/migrate: Fix go routine leak in disconnect if client never connects Signed-off-by: Thomas Parrott --- lxd/migrate.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lxd/migrate.go b/lxd/migrate.go index 69c43b3a088a..29e0c918be0e 100644 --- a/lxd/migrate.go +++ b/lxd/migrate.go @@ -78,7 +78,9 @@ func (c *migrationFields) recv(m proto.Message) error { func (c *migrationFields) disconnect() { c.controlLock.Lock() - conn, _ := c.conns[api.SecretNameControl].WebSocket(context.TODO()) + ctx, cancel := context.WithTimeout(context.TODO(), time.Second) + defer cancel() + conn, _ := c.conns[api.SecretNameControl].WebSocket(ctx) if conn != nil { closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") _ = conn.SetWriteDeadline(time.Now().Add(time.Second * 30)) From 02cf3196581394c8b9e44f0ca4ad27a4cb47b2a6 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Tue, 11 Jul 2023 14:47:26 +0100 Subject: [PATCH 192/543] shared/subprocess/proc: Fix go routine leak in start We should not call cmd.Process.Wait() as otherwise there is go routine leak as the error channel collected in cmd.Wait() isn't collected. Signed-off-by: Thomas Parrott --- shared/subprocess/proc.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/shared/subprocess/proc.go b/shared/subprocess/proc.go index 270da19a5660..a9fb05ee842c 100644 --- a/shared/subprocess/proc.go +++ b/shared/subprocess/proc.go @@ -173,16 +173,20 @@ func (p *Process) start(ctx context.Context, fds []*os.File) error { go func() { defer close(p.chExit) - procstate, err := cmd.Process.Wait() - if err != nil { + err := cmd.Wait() + + if cmd.ProcessState != nil { + p.exitCode = int64(cmd.ProcessState.ExitCode()) + } else { p.exitCode = -1 + } + + if err != nil { p.exitErr = err return } - exitcode := int64(procstate.ExitCode()) - p.exitCode = exitcode if p.exitCode != 0 { p.exitErr = fmt.Errorf("Process exited with non-zero value %d", p.exitCode) } From 7577bb6cc4774ac369c96f51a36d6ae288975233 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 3 Jul 2023 23:19:29 +0100 Subject: [PATCH 193/543] test: Cleanup instances at end of metrics test Signed-off-by: Thomas Parrott --- test/suites/metrics.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/suites/metrics.sh b/test/suites/metrics.sh index 43f7fc7f17e8..7782d4e7faa1 100644 --- a/test/suites/metrics.sh +++ b/test/suites/metrics.sh @@ -48,4 +48,6 @@ test_metrics() { ! curl -k -s -X GET "https://${metrics_addr}/1.0/metrics" | grep "name=\"c1\"" || false lxc config set core.metrics_authentication=false curl -k -s -X GET "https://${metrics_addr}/1.0/metrics" | grep "name=\"c1\"" + + lxc delete -f c1 c2 } From 5ac576044fdd745b4388f4b563790ad4c7b7708d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Tue, 11 Jul 2023 09:12:49 +0200 Subject: [PATCH 194/543] lxd/instance: Prevent MAC/address conflict for imported instances MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Imported instances have the same set of addresses as their parent. Starting those duplicates should produce a warning. Signed-off-by: Julian Pelizäus --- lxd/instance/instance_utils.go | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/lxd/instance/instance_utils.go b/lxd/instance/instance_utils.go index b74fd232ae74..343ca72dcd0a 100644 --- a/lxd/instance/instance_utils.go +++ b/lxd/instance/instance_utils.go @@ -1050,6 +1050,11 @@ func NextSnapshotName(s *state.State, inst Instance, defaultPattern string) (str return pattern, nil } +// temporaryName concatenates the move prefix and instUUID for a temporary instance. +func temporaryName(instUUID string) string { + return fmt.Sprintf("lxd-move-of-%s", instUUID) +} + // MoveTemporaryName returns a name derived from the instance's volatile.uuid, to use when moving an instance // across pools or cluster members which can be used for the naming the temporary copy before deleting the original // instance and renaming the copy to the original name. @@ -1064,7 +1069,7 @@ func MoveTemporaryName(inst Instance) (string, error) { } } - return fmt.Sprintf("lxd-move-of-%s", instUUID), nil + return temporaryName(instUUID), nil } // IsSameLogicalInstance returns true if the supplied Instance and db.Instance have the same project and name or @@ -1075,11 +1080,23 @@ func IsSameLogicalInstance(inst Instance, dbInst *db.InstanceArgs) bool { return true } - // Instance UUID is expected to be globally unique (which then allows for the *temporary* existence of - // duplicate instances of different names with the same volatile.uuid in order to accommodate moving - // instances between projects and storage pools without triggering duplicate resource errors). + // Don't trigger duplicate resource errors for temporary copies. if dbInst.Config["volatile.uuid"] == inst.LocalConfig()["volatile.uuid"] { - return true + // Accommodate moving instances between storage pools. + // Check temporary copy against source. + if dbInst.Name == temporaryName(inst.LocalConfig()["volatile.uuid"]) { + return true + } + + // Check source against temporary copy. + if inst.Name() == temporaryName(dbInst.Config["volatile.uuid"]) { + return true + } + + // Accommodate moving instances between projects. + if dbInst.Project != inst.Project().Name { + return true + } } return false From c8a61af714ae9f13ee05436d45e9f0c7ae9fec23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Tue, 11 Jul 2023 10:07:28 +0200 Subject: [PATCH 195/543] tests: Add container export/import address conflict test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- test/suites/container_devices_nic_bridged.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/suites/container_devices_nic_bridged.sh b/test/suites/container_devices_nic_bridged.sh index 4274fe8d8de0..0d217522f4cd 100644 --- a/test/suites/container_devices_nic_bridged.sh +++ b/test/suites/container_devices_nic_bridged.sh @@ -616,6 +616,18 @@ test_container_devices_nic_bridged() { grep -F "192.0.2.232" "${LXD_DIR}/networks/${brName}/dnsmasq.hosts/foo.eth0" lxc delete -f foo + # Test container without extra network configuration can be restored from backup. + lxc init testimage foo -p "${ctName}" + lxc export foo foo.tar.gz + lxc import foo.tar.gz foo2 + rm foo.tar.gz + lxc profile assign foo2 "${ctName}" + + # Test container start will fail due to volatile MAC conflict. + lxc config get foo volatile.eth0.hwaddr | grep -Fx "$(lxc config get foo2 volatile.eth0.hwaddr)" + ! lxc start foo2 || false + lxc delete -f foo foo2 + # Check we haven't left any NICS lying around. endNicCount=$(find /sys/class/net | wc -l) if [ "$startNicCount" != "$endNicCount" ]; then From 65e616962471603b1fe08303b0fda2c12da96e09 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Tue, 11 Jul 2023 11:14:10 +0200 Subject: [PATCH 196/543] lxd/storage/drivers/zfs: Fix content type detection for custom block volumes This fixes an issue where custom block volumes would be listed as datasets instead of volumes. Fixes #11984 Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_zfs_volumes.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/lxd/storage/drivers/driver_zfs_volumes.go b/lxd/storage/drivers/driver_zfs_volumes.go index 2790df61e400..4b221701ac4c 100644 --- a/lxd/storage/drivers/driver_zfs_volumes.go +++ b/lxd/storage/drivers/driver_zfs_volumes.go @@ -1842,16 +1842,19 @@ func (d *zfs) ListVolumes() ([]Volume, error) { continue // Ignore unrecognised volume. } - // Detect if a volume is block content type using both the defined suffix and the dataset type. - isBlock := strings.HasSuffix(volName, zfsBlockVolSuffix) && zfsContentType == "volume" + // Detect if a volume is block content type using only the dataset type. + isBlock := zfsContentType == "volume" if volType == VolumeTypeVM && !isBlock { continue // Ignore VM filesystem volumes as we will just return the VM's block volume. } contentType := ContentTypeFS - if volType == VolumeTypeVM || isBlock { + if isBlock { contentType = ContentTypeBlock + } + + if volType == VolumeTypeVM || isBlock { volName = strings.TrimSuffix(volName, zfsBlockVolSuffix) } @@ -1863,7 +1866,7 @@ func (d *zfs) ListVolumes() ([]Volume, error) { if !foundExisting || (existingVol.Type() == VolumeTypeImage && existingVol.ContentType() == ContentTypeFS) { v := NewVolume(d, d.name, volType, contentType, volName, make(map[string]string), d.config) - if zfsContentType == "volume" { + if isBlock { v.SetMountFilesystemProbe(true) } From a9fbddb44b26cc2106ede8405251a0c44ac363a0 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Tue, 11 Jul 2023 15:20:39 +0200 Subject: [PATCH 197/543] test: Test recovering custom block volumes Signed-off-by: Thomas Hipp --- test/main.sh | 1 + test/suites/backup.sh | 31 +++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/test/main.sh b/test/main.sh index c8d4c67c5b83..99424dbe0d88 100755 --- a/test/main.sh +++ b/test/main.sh @@ -336,6 +336,7 @@ if [ "${1:-"all"}" != "cluster" ]; then run_test test_filtering "API filtering" run_test test_warnings "Warnings" run_test test_metrics "Metrics" + run_test test_storage_volume_recover "Recover storage volumes" fi # shellcheck disable=SC2034 diff --git a/test/suites/backup.sh b/test/suites/backup.sh index 8bd993f532a2..8a339c6f775e 100644 --- a/test/suites/backup.sh +++ b/test/suites/backup.sh @@ -1,3 +1,34 @@ +test_storage_volume_recover() { + LXD_IMPORT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD_IMPORT_DIR}" + spawn_lxd "${LXD_IMPORT_DIR}" true + + poolName=$(lxc profile device get default root pool) + + # Create custom block volume. + lxc storage volume create "${poolName}" vol1 --type=block + + # Delete database entry of the created custom block volume. + lxd sql global "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='vol1'" + + # Ensure the custom block volume is no longer listed. + ! lxc storage volume show "${poolName}" vol1 || false + + # Recover custom block volume. + cat < Date: Wed, 12 Jul 2023 16:21:24 +0200 Subject: [PATCH 198/543] lxd/instance/driver/qemu: Use happy path style MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- lxd/instance/drivers/driver_qemu.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 6f3e3900ca5c..b0a07ecba6a6 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -7260,20 +7260,20 @@ func (d *qemu) reservedVsockID(vsockID uint32) bool { // getVsockID returns the vsock Context ID for the VM. func (d *qemu) getVsockID() (uint32, error) { existingVsockID, ok := d.localConfig["volatile.vsock_id"] - if ok { - vsockID, err := strconv.ParseUint(existingVsockID, 10, 32) - if err != nil { - return 0, fmt.Errorf("Failed to parse volatile.vsock_id: %q: %w", existingVsockID, err) - } + if !ok { + return 0, fmt.Errorf("Context ID not set in volatile.vsock_id") + } - if d.reservedVsockID(uint32(vsockID)) { - return 0, fmt.Errorf("Failed to use reserved vsock Context ID: %q", vsockID) - } + vsockID, err := strconv.ParseUint(existingVsockID, 10, 32) + if err != nil { + return 0, fmt.Errorf("Failed to parse volatile.vsock_id: %q: %w", existingVsockID, err) + } - return uint32(vsockID), nil + if d.reservedVsockID(uint32(vsockID)) { + return 0, fmt.Errorf("Failed to use reserved vsock Context ID: %q", vsockID) } - return 0, fmt.Errorf("Context ID not set in volatile.vsock_id") + return uint32(vsockID), nil } // freeVsockID returns true if the given vsockID is not yet acquired. From 7e3d98e66950423aef8c4e5f29bf558df1cc93c9 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 3 Jul 2023 22:36:30 +0100 Subject: [PATCH 199/543] shared/ws/mirror: Remove unused MirrorWithHooks and use MirrorRead and MirrorWrite in Mirror function Signed-off-by: Thomas Parrott --- shared/ws/mirror.go | 61 ++------------------------------------------- 1 file changed, 2 insertions(+), 59 deletions(-) diff --git a/shared/ws/mirror.go b/shared/ws/mirror.go index 3aa96b235bb4..e2449341c1cd 100644 --- a/shared/ws/mirror.go +++ b/shared/ws/mirror.go @@ -12,65 +12,8 @@ import ( // Mirror takes a websocket and replicates all read/write to a ReadWriteCloser. // Returns channels indicating when reads and writes are finished (respectively). func Mirror(ctx context.Context, conn *websocket.Conn, rwc io.ReadWriteCloser) (chan struct{}, chan struct{}) { - return MirrorWithHooks(ctx, conn, rwc, nil, nil) -} - -// MirrorWithHooks is identical to Mirror but allows for code to be run at the end of the read or write operations. -// Returns channels indicating when reads and writes are finished (respectively). -func MirrorWithHooks(ctx context.Context, conn *websocket.Conn, rwc io.ReadWriteCloser, hookRead func(conn *websocket.Conn), hookWrite func(conn *websocket.Conn)) (chan struct{}, chan struct{}) { - logger.Debug("Websocket: Started mirror", logger.Ctx{"address": conn.RemoteAddr().String()}) - - chRead := make(chan struct{}, 1) - chWrite := make(chan struct{}, 1) - chDone := make(chan struct{}, 1) - - connRWC := NewWrapper(conn) - - go func() { - _, _ = io.Copy(rwc, connRWC) - defer close(chWrite) - - // Call the hook. - if hookRead != nil { - hookRead(conn) - } - }() - - go func() { - _, _ = io.Copy(connRWC, rwc) - defer close(chRead) - - // Call the hook. - if hookWrite != nil { - hookWrite(conn) - } - - // Send write barrier. - connRWC.Close() - }() - - go func() { - <-chRead - <-chWrite - close(chDone) - - // Send close message. - closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") - _ = conn.WriteMessage(websocket.CloseMessage, closeMsg) - - logger.Debug("Websocket: Stopped mirror", logger.Ctx{"address": conn.RemoteAddr().String()}) - }() - - go func() { - // Handle cancelation. - select { - case <-ctx.Done(): - case <-chDone: - } - - // Close the ReadWriteCloser on termination. - rwc.Close() - }() + chRead := MirrorRead(ctx, conn, rwc) + chWrite := MirrorWrite(ctx, conn, rwc) return chRead, chWrite } From 72c54a75de00669494c985aa51f1b3e9dcdfe7ea Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Wed, 12 Jul 2023 10:42:36 +0100 Subject: [PATCH 200/543] client/lxd/instances: Don't wait for remote viewer to finish when LXD connection finishes in ConsoleInstanceDynamic Signed-off-by: Thomas Parrott --- client/lxd_instances.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/client/lxd_instances.go b/client/lxd_instances.go index 26b95272021e..18b1a85441ee 100644 --- a/client/lxd_instances.go +++ b/client/lxd_instances.go @@ -2413,8 +2413,7 @@ func (r *ProtocolLXD) ConsoleInstanceDynamic(instanceName string, console api.In } // Attach reader/writer. - readDone, writeDone := ws.Mirror(context.Background(), conn, rwc) - <-readDone + _, writeDone := ws.Mirror(context.Background(), conn, rwc) <-writeDone _ = conn.Close() From 0bc8a2267a0df9372d266a1e82e64f5443ae4f55 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Wed, 12 Jul 2023 10:43:43 +0100 Subject: [PATCH 201/543] lxd/instance/console: Improve logging in connectVGA Signed-off-by: Thomas Parrott --- lxd/instance_console.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lxd/instance_console.go b/lxd/instance_console.go index 676d137d3d1d..4a388289e603 100644 --- a/lxd/instance_console.go +++ b/lxd/instance_console.go @@ -165,13 +165,15 @@ func (s *consoleWs) connectVGA(op *operations.Operation, r *http.Request, w http // Mirror the console and websocket. go func() { - defer logger.Debug("Finished mirroring websocket to console") + l := logger.AddContext(logger.Ctx{"address": conn.RemoteAddr().String()}) - logger.Debug("Started mirroring websocket") + defer l.Debug("Finished mirroring websocket to console") + + l.Debug("Started mirroring websocket") readDone, writeDone := ws.Mirror(context.Background(), conn, console) <-readDone - logger.Debugf("Finished mirroring console to websocket") + l.Debug("Finished mirroring console to websocket") <-writeDone }() From 54f742b126b817a866bebd4125ce8e94dc297ebc Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Wed, 12 Jul 2023 10:44:11 +0100 Subject: [PATCH 202/543] lxd/instance/console: Improve logging in doConsole Signed-off-by: Thomas Parrott --- lxd/instance_console.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lxd/instance_console.go b/lxd/instance_console.go index 4a388289e603..133cb172f01a 100644 --- a/lxd/instance_console.go +++ b/lxd/instance_console.go @@ -280,16 +280,18 @@ func (s *consoleWs) doConsole(op *operations.Operation) error { // Mirror the console and websocket. mirrorDoneCh := make(chan struct{}) go func() { - defer logger.Debug("Finished mirroring websocket to console") s.connsLock.Lock() conn := s.conns[0] s.connsLock.Unlock() - logger.Debug("Started mirroring websocket") + l := logger.AddContext(logger.Ctx{"address": conn.RemoteAddr().String()}) + defer l.Debug("Finished mirroring websocket to console") + + l.Debug("Started mirroring websocket") readDone, writeDone := ws.Mirror(context.Background(), conn, console) <-readDone - logger.Debug("Finished mirroring console to websocket") + l.Debug("Finished mirroring console to websocket") <-writeDone close(mirrorDoneCh) }() From 39a98cf8f62b520fc50f7fadf760437cc99f5d34 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Wed, 12 Jul 2023 10:41:26 +0100 Subject: [PATCH 203/543] client/lxd: Use ws.Mirror in ConsoleContainer functions Signed-off-by: Thomas Parrott --- client/lxd_containers.go | 4 ++-- client/lxd_instances.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/client/lxd_containers.go b/client/lxd_containers.go index 1e94447ffcef..1bbf80efeefc 100644 --- a/client/lxd_containers.go +++ b/client/lxd_containers.go @@ -1578,8 +1578,8 @@ func (r *ProtocolLXD) ConsoleContainer(containerName string, console api.Contain // And attach stdin and stdout to it go func() { - ws.MirrorRead(context.Background(), conn, args.Terminal) - <-ws.MirrorWrite(context.Background(), conn, args.Terminal) + _, writeDone := ws.Mirror(context.Background(), conn, args.Terminal) + <-writeDone _ = conn.Close() }() diff --git a/client/lxd_instances.go b/client/lxd_instances.go index 18b1a85441ee..c6515de1223f 100644 --- a/client/lxd_instances.go +++ b/client/lxd_instances.go @@ -2326,8 +2326,8 @@ func (r *ProtocolLXD) ConsoleInstance(instanceName string, console api.InstanceC // And attach stdin and stdout to it go func() { - ws.MirrorRead(context.Background(), conn, args.Terminal) - <-ws.MirrorWrite(context.Background(), conn, args.Terminal) + _, writeDone := ws.Mirror(context.Background(), conn, args.Terminal) + <-writeDone _ = conn.Close() }() From 736ea6a688f23d91a1a2b9059de22587af00ff30 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Tue, 11 Jul 2023 16:39:01 +0100 Subject: [PATCH 204/543] test: Reset rsync compression mode in migration Signed-off-by: Thomas Parrott --- test/suites/migration.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/test/suites/migration.sh b/test/suites/migration.sh index 7c3b237adc0f..322d4f45406a 100644 --- a/test/suites/migration.sh +++ b/test/suites/migration.sh @@ -431,6 +431,7 @@ migration() { lxc_remote storage volume copy l1:"$remote_pool1"/foo l2:"$remote_pool2"/bar lxc_remote storage volume delete l1:"$remote_pool1" foo lxc_remote storage volume delete l2:"$remote_pool2" bar + lxc_remote storage unset l1:"$remote_pool1" rsync.compression # Test some migration between projects lxc_remote project create l1:proj -c features.images=false -c features.profiles=false From 274caddd41c3a6a42ae05ad940fda3a43de19801 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Tue, 11 Jul 2023 16:39:18 +0100 Subject: [PATCH 205/543] test: Force delete instance in migration Signed-off-by: Thomas Parrott --- test/suites/migration.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/suites/migration.sh b/test/suites/migration.sh index 322d4f45406a..b0462642f209 100644 --- a/test/suites/migration.sh +++ b/test/suites/migration.sh @@ -460,7 +460,7 @@ migration() { lxc_remote copy l2:c1 l1: lxc_remote start l1:c1 lxc_remote delete l1:c1 -f - lxc_remote delete l2:c1 + lxc_remote delete l2:c1 -f lxc_remote project switch l1:default lxc_remote project delete l1:proj From ed8859ef211f7bf982b321627076549b7b0272be Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 3 Jul 2023 16:57:04 +0100 Subject: [PATCH 206/543] test: Fix lxc-to-lxd tests Signed-off-by: Thomas Parrott --- test/suites/lxc-to-lxd.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/suites/lxc-to-lxd.sh b/test/suites/lxc-to-lxd.sh index a0431b623428..65aeb4545ddf 100644 --- a/test/suites/lxc-to-lxd.sh +++ b/test/suites/lxc-to-lxd.sh @@ -5,8 +5,14 @@ test_lxc_to_lxd() { mkdir -p "${LXC_DIR}" + lxc network create lxcbr0 + # Create LXC containers lxc-create -P "${LXC_DIR}" -n c1 -B dir -t busybox + lxc-start -P "${LXC_DIR}" -n c1 + lxc-attach -P "${LXC_DIR}" -n c1 -- touch /root/foo + lxc-stop -P "${LXC_DIR}" -n c1 --kill + lxc-create -P "${LXC_DIR}" -n c2 -B dir -t busybox lxc-create -P "${LXC_DIR}" -n c3 -B dir -t busybox @@ -34,6 +40,7 @@ test_lxc_to_lxd() { # Ensure the converted container is startable lxc start c1 + lxc exec c1 -- stat /root/foo lxc delete -f c1 # Convert some LXC containers @@ -57,4 +64,7 @@ test_lxc_to_lxd() { lxc info c1 lxc info c2 lxc info c3 + + lxc delete -f c1 c2 c3 + lxc network delete lxcbr0 } From c71a20946117033ebf3af6d23b44de112b8736a2 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 3 Jul 2023 22:46:12 +0100 Subject: [PATCH 207/543] test: Enable lxd-to-lxd test Signed-off-by: Thomas Parrott --- test/main.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/test/main.sh b/test/main.sh index 99424dbe0d88..73cf588e0145 100755 --- a/test/main.sh +++ b/test/main.sh @@ -302,6 +302,7 @@ if [ "${1:-"all"}" != "cluster" ]; then run_test test_devlxd "/dev/lxd" run_test test_fuidshift "fuidshift" run_test test_migration "migration" + run_test test_lxc_to_lxd "LXC to LXD" run_test test_fdleak "fd leak" run_test test_storage "storage" run_test test_storage_volume_snapshots "storage volume snapshots" From 6cb499c941601f57521c352b68de2fd1044c06b4 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Wed, 12 Jul 2023 10:57:18 +0100 Subject: [PATCH 208/543] lxc-to-lxd/transfer: When remote websocket reaches EOF close connection to rsync Fixes hang. Signed-off-by: Thomas Parrott --- lxc-to-lxd/transfer.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lxc-to-lxd/transfer.go b/lxc-to-lxd/transfer.go index 6836ea0f0544..11803172359c 100644 --- a/lxc-to-lxd/transfer.go +++ b/lxc-to-lxd/transfer.go @@ -30,6 +30,8 @@ func rsyncSend(conn *websocket.Conn, path string, rsyncArgs string) error { } readDone, writeDone := ws.Mirror(context.Background(), conn, dataSocket) + <-writeDone + _ = dataSocket.Close() output, err := io.ReadAll(stderr) if err != nil { @@ -40,7 +42,6 @@ func rsyncSend(conn *websocket.Conn, path string, rsyncArgs string) error { err = cmd.Wait() <-readDone - <-writeDone if err != nil { return fmt.Errorf("Failed to rsync: %v\n%s", err, output) From 0047540e61999da246fea51f722741772cf7b04b Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Wed, 12 Jul 2023 15:37:33 +0100 Subject: [PATCH 209/543] lxc-to-lxd/utils: Use abort function earlier on Signed-off-by: Thomas Parrott --- lxc-to-lxd/utils.go | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/lxc-to-lxd/utils.go b/lxc-to-lxd/utils.go index e0859da499fc..123a8707f5c6 100644 --- a/lxc-to-lxd/utils.go +++ b/lxc-to-lxd/utils.go @@ -17,9 +17,14 @@ func transferRootfs(dst lxd.ContainerServer, op lxd.Operation, rootfs string, rs return err } + abort := func(err error) error { + protoSendError(wsControl, err) + return err + } + wsFs, err := op.GetWebsocket(opAPI.Metadata[api.SecretNameFilesystem].(string)) if err != nil { - return err + return abort(err) } // Setup control struct @@ -36,22 +41,15 @@ func transferRootfs(dst lxd.ContainerServer, op lxd.Operation, rootfs string, rs err = migration.ProtoSend(wsControl, &header) if err != nil { - protoSendError(wsControl, err) - return err + return abort(err) } err = migration.ProtoRecv(wsControl, &header) if err != nil { - protoSendError(wsControl, err) - return err + return abort(err) } // Send the filesystem - abort := func(err error) error { - protoSendError(wsControl, err) - return err - } - err = rsyncSend(wsFs, rootfs, rsyncArgs) if err != nil { return abort(err) From f1d58c8ea012be365c76ce2ed63f85fde03d0d87 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Wed, 12 Jul 2023 15:37:59 +0100 Subject: [PATCH 210/543] lxc-to-lxd/utils: Detect and fail if negotiated rsync features don't match Signed-off-by: Thomas Parrott --- lxc-to-lxd/utils.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/lxc-to-lxd/utils.go b/lxc-to-lxd/utils.go index 123a8707f5c6..94d620b4923d 100644 --- a/lxc-to-lxd/utils.go +++ b/lxc-to-lxd/utils.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "reflect" "github.com/canonical/lxd/client" "github.com/canonical/lxd/lxd/migration" @@ -30,7 +31,7 @@ func transferRootfs(dst lxd.ContainerServer, op lxd.Operation, rootfs string, rs // Setup control struct fs := migration.MigrationFSType_RSYNC rsyncHasFeature := true - header := migration.MigrationHeader{ + offerHeader := migration.MigrationHeader{ Fs: &fs, RsyncFeatures: &migration.RsyncFeatures{ Xattrs: &rsyncHasFeature, @@ -39,16 +40,24 @@ func transferRootfs(dst lxd.ContainerServer, op lxd.Operation, rootfs string, rs }, } - err = migration.ProtoSend(wsControl, &header) + err = migration.ProtoSend(wsControl, &offerHeader) if err != nil { return abort(err) } - err = migration.ProtoRecv(wsControl, &header) + var respHeader migration.MigrationHeader + err = migration.ProtoRecv(wsControl, &respHeader) if err != nil { return abort(err) } + rsyncFeaturesOffered := offerHeader.GetRsyncFeaturesSlice() + rsyncFeaturesResponse := respHeader.GetRsyncFeaturesSlice() + + if !reflect.DeepEqual(rsyncFeaturesOffered, rsyncFeaturesResponse) { + return abort(fmt.Errorf("Offered rsync features (%v) differ from those in the migration response (%v)", rsyncFeaturesOffered, rsyncFeaturesResponse)) + } + // Send the filesystem err = rsyncSend(wsFs, rootfs, rsyncArgs) if err != nil { From dd5806406dbfd210c35eb21d080871acbe2bedff Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Wed, 12 Jul 2023 15:21:48 +0100 Subject: [PATCH 211/543] lxd/storage/drivers/generic/vfs: Improve logging in genericVFSCreateVolumeFromMigration Signed-off-by: Thomas Parrott --- lxd/storage/drivers/generic_vfs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/storage/drivers/generic_vfs.go b/lxd/storage/drivers/generic_vfs.go index d71e36ae019c..d4e55c4afeae 100644 --- a/lxd/storage/drivers/generic_vfs.go +++ b/lxd/storage/drivers/generic_vfs.go @@ -306,7 +306,7 @@ func genericVFSCreateVolumeFromMigration(d Driver, initVolume func(vol Volume) ( wrapper = migration.ProgressTracker(op, "fs_progress", volName) } - d.Logger().Debug("Receiving filesystem volume started", logger.Ctx{"volName": volName, "path": path}) + d.Logger().Debug("Receiving filesystem volume started", logger.Ctx{"volName": volName, "path": path, "features": volTargetArgs.MigrationType.Features}) defer d.Logger().Debug("Receiving filesystem volume stopped", logger.Ctx{"volName": volName, "path": path}) return rsync.Recv(path, conn, wrapper, volTargetArgs.MigrationType.Features) From ed447910d9c9c2e7bde51ef9218b5e61514fc8e2 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Wed, 12 Jul 2023 15:22:08 +0100 Subject: [PATCH 212/543] lxd/storage/drivers/driver/ceph: Update MigrationTypes to support rsync xattrs Signed-off-by: Thomas Parrott --- lxd/storage/drivers/driver_ceph.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lxd/storage/drivers/driver_ceph.go b/lxd/storage/drivers/driver_ceph.go index 8fac13800882..3c874cb7fc40 100644 --- a/lxd/storage/drivers/driver_ceph.go +++ b/lxd/storage/drivers/driver_ceph.go @@ -384,9 +384,9 @@ func (d *ceph) MigrationTypes(contentType ContentType, refresh bool, copySnapsho // Do not pass compression argument to rsync if the associated // config key, that is rsync.compression, is set to false. if shared.IsFalse(d.Config()["rsync.compression"]) { - rsyncFeatures = []string{"delete", "bidirectional"} + rsyncFeatures = []string{"xattrs", "delete", "bidirectional"} } else { - rsyncFeatures = []string{"delete", "compress", "bidirectional"} + rsyncFeatures = []string{"xattrs", "delete", "compress", "bidirectional"} } if refresh { From 52cf8a57db4a81503f5dbd30aac1c0cee09eb9f3 Mon Sep 17 00:00:00 2001 From: Max Asnaashari Date: Thu, 13 Jul 2023 00:09:57 +0000 Subject: [PATCH 213/543] lxd/cluster/membership: Apply MemberConfig to storage config table There are some target-specific storage pool keys (i.e. source) that are applied to the driver, but not saved to the database when specified in a joining cluster member's MemberConfig data. This commit ensures all node-specific keys supplied in MemberConfig are applied to the database appropriately. Signed-off-by: Max Asnaashari --- lxd/cluster/membership.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go index 98d8fe5ef38b..0373aae61e49 100644 --- a/lxd/cluster/membership.go +++ b/lxd/cluster/membership.go @@ -468,6 +468,17 @@ func Join(state *state.State, gateway *Gateway, networkCert *shared.CertInfo, se return fmt.Errorf("Failed to get storage pool driver: %w", err) } + // For all pools we add the config provided by the joining node. + config, ok := pools[name] + if !ok { + return fmt.Errorf("Joining member has no config for pool %s", name) + } + + err = tx.CreateStoragePoolConfig(id, node.ID, config) + if err != nil { + return fmt.Errorf("Failed to add joining node's pool config: %w", err) + } + if shared.StringInSlice(driver, []string{"ceph", "cephfs"}) { // For ceph pools we have to create volume // entries for the joining node. @@ -475,17 +486,6 @@ func Join(state *state.State, gateway *Gateway, networkCert *shared.CertInfo, se if err != nil { return fmt.Errorf("Failed to create ceph volumes for joining node: %w", err) } - } else { - // For other pools we add the config provided by the joining node. - config, ok := pools[name] - if !ok { - return fmt.Errorf("Joining member has no config for pool %s", name) - } - - err = tx.CreateStoragePoolConfig(id, node.ID, config) - if err != nil { - return fmt.Errorf("Failed to add joining node's pool config: %w", err) - } } } From 6e02447f44a00d7e9c3ccde74280d3091db73b79 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Thu, 13 Jul 2023 08:18:02 +0100 Subject: [PATCH 214/543] shared/ws/mirror: Don't send normal close message at end of MirrorWrite This prevents the web socket from being usable for anything else, which is not necessary. Signed-off-by: Thomas Parrott --- shared/ws/mirror.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/shared/ws/mirror.go b/shared/ws/mirror.go index e2449341c1cd..df6138a6c913 100644 --- a/shared/ws/mirror.go +++ b/shared/ws/mirror.go @@ -69,10 +69,6 @@ func MirrorWrite(ctx context.Context, conn *websocket.Conn, wc io.WriteCloser) c _, _ = io.Copy(wc, connRWC) defer close(chDone) - // Send close message. - closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") - _ = conn.WriteMessage(websocket.CloseMessage, closeMsg) - logger.Debug("Websocket: Stopped write mirror", logger.Ctx{"address": conn.RemoteAddr().String()}) }() From 8e13d83813d94046befda5aa74d41a1ee68849a8 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Thu, 13 Jul 2023 08:20:04 +0100 Subject: [PATCH 215/543] shared/ws/mirror: Setup defer to close channel first Signed-off-by: Thomas Parrott --- shared/ws/mirror.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shared/ws/mirror.go b/shared/ws/mirror.go index df6138a6c913..0e6ed9cad0d1 100644 --- a/shared/ws/mirror.go +++ b/shared/ws/mirror.go @@ -31,8 +31,8 @@ func MirrorRead(ctx context.Context, conn *websocket.Conn, rc io.ReadCloser) cha connRWC := NewWrapper(conn) go func() { - _, _ = io.Copy(connRWC, rc) defer close(chDone) + _, _ = io.Copy(connRWC, rc) // Send write barrier. connRWC.Close() @@ -66,8 +66,8 @@ func MirrorWrite(ctx context.Context, conn *websocket.Conn, wc io.WriteCloser) c connRWC := NewWrapper(conn) go func() { - _, _ = io.Copy(wc, connRWC) defer close(chDone) + _, _ = io.Copy(wc, connRWC) logger.Debug("Websocket: Stopped write mirror", logger.Ctx{"address": conn.RemoteAddr().String()}) }() From 49de0826419f099b22e2e58d63b99b3be7f420f5 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 3 Jul 2023 23:13:49 +0100 Subject: [PATCH 216/543] lxd-migrate/transfer: Pass context into rsyncSend Signed-off-by: Thomas Parrott --- lxd-migrate/transfer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd-migrate/transfer.go b/lxd-migrate/transfer.go index 9ecc69f5d4e8..96c154373700 100644 --- a/lxd-migrate/transfer.go +++ b/lxd-migrate/transfer.go @@ -31,7 +31,7 @@ func rsyncSend(ctx context.Context, conn *websocket.Conn, path string, rsyncArgs defer func() { _ = dataSocket.Close() }() } - readDone, writeDone := ws.Mirror(context.Background(), conn, dataSocket) + readDone, writeDone := ws.Mirror(ctx, conn, dataSocket) output, err := io.ReadAll(stderr) if err != nil { From a911d254e09f8ab3c3c6ec74ba6d0009f15df0a5 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Mon, 3 Jul 2023 23:14:14 +0100 Subject: [PATCH 217/543] lxd-migrate/utils: Improve errors in transferRootfs Signed-off-by: Thomas Parrott --- lxd-migrate/utils.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lxd-migrate/utils.go b/lxd-migrate/utils.go index 8cd54778fd69..d9d1faee09ab 100644 --- a/lxd-migrate/utils.go +++ b/lxd-migrate/utils.go @@ -94,7 +94,7 @@ func transferRootfs(ctx context.Context, dst lxd.InstanceServer, op lxd.Operatio err = rsyncSend(ctx, wsFs, rootfs, rsyncArgs, instanceType) if err != nil { - return abort(err) + return abort(fmt.Errorf("Failed sending filesystem volume: %w", err)) } // Send block volume @@ -116,7 +116,7 @@ func transferRootfs(ctx context.Context, dst lxd.InstanceServer, op lxd.Operatio _, err = io.Copy(conn, f) if err != nil { - return err + return abort(fmt.Errorf("Failed sending block volume: %w", err)) } err = conn.Close() From 4e26299ba8b5a2c5deb7d06808b997a23929b80c Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Thu, 13 Jul 2023 08:04:06 +0100 Subject: [PATCH 218/543] lxd-migrate/utils: Use abort cleanup function earlier and more often Signed-off-by: Thomas Parrott --- lxd-migrate/utils.go | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/lxd-migrate/utils.go b/lxd-migrate/utils.go index d9d1faee09ab..491c69b84bcc 100644 --- a/lxd-migrate/utils.go +++ b/lxd-migrate/utils.go @@ -37,9 +37,14 @@ func transferRootfs(ctx context.Context, dst lxd.InstanceServer, op lxd.Operatio return err } + abort := func(err error) error { + protoSendError(wsControl, err) + return err + } + wsFs, err := op.GetWebsocket(opAPI.Metadata[api.SecretNameFilesystem].(string)) if err != nil { - return err + return abort(err) } // Setup control struct @@ -66,7 +71,7 @@ func transferRootfs(ctx context.Context, dst lxd.InstanceServer, op lxd.Operatio if instanceType == api.InstanceTypeVM { stat, err := os.Stat(filepath.Join(rootfs, "root.img")) if err != nil { - return err + return abort(err) } size := stat.Size() @@ -76,22 +81,15 @@ func transferRootfs(ctx context.Context, dst lxd.InstanceServer, op lxd.Operatio err = migration.ProtoSend(wsControl, &header) if err != nil { - protoSendError(wsControl, err) - return err + return abort(err) } err = migration.ProtoRecv(wsControl, &header) if err != nil { - protoSendError(wsControl, err) - return err + return abort(err) } // Send the filesystem - abort := func(err error) error { - protoSendError(wsControl, err) - return err - } - err = rsyncSend(ctx, wsFs, rootfs, rsyncArgs, instanceType) if err != nil { return abort(fmt.Errorf("Failed sending filesystem volume: %w", err)) @@ -101,7 +99,7 @@ func transferRootfs(ctx context.Context, dst lxd.InstanceServer, op lxd.Operatio if instanceType == api.InstanceTypeVM { f, err := os.Open(filepath.Join(rootfs, "root.img")) if err != nil { - return err + return abort(err) } defer func() { _ = f.Close() }() @@ -121,7 +119,7 @@ func transferRootfs(ctx context.Context, dst lxd.InstanceServer, op lxd.Operatio err = conn.Close() if err != nil { - return err + return abort(err) } } From 71981ce8fca114a0af599a27cb86de11bfc4dff7 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Thu, 13 Jul 2023 08:04:49 +0100 Subject: [PATCH 219/543] lxd-migrate/utils: Detect and fail if negotiated rsync features don't match Signed-off-by: Thomas Parrott --- lxd-migrate/utils.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/lxd-migrate/utils.go b/lxd-migrate/utils.go index 491c69b84bcc..74c6270ddb39 100644 --- a/lxd-migrate/utils.go +++ b/lxd-migrate/utils.go @@ -10,6 +10,7 @@ import ( "net/url" "os" "path/filepath" + "reflect" "strings" "github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery" @@ -59,7 +60,7 @@ func transferRootfs(ctx context.Context, dst lxd.InstanceServer, op lxd.Operatio rsyncHasFeature = true } - header := migration.MigrationHeader{ + offerHeader := migration.MigrationHeader{ RsyncFeatures: &migration.RsyncFeatures{ Xattrs: &rsyncHasFeature, Delete: &rsyncHasFeature, @@ -75,20 +76,28 @@ func transferRootfs(ctx context.Context, dst lxd.InstanceServer, op lxd.Operatio } size := stat.Size() - header.VolumeSize = &size + offerHeader.VolumeSize = &size rootfs = shared.AddSlash(rootfs) } - err = migration.ProtoSend(wsControl, &header) + err = migration.ProtoSend(wsControl, &offerHeader) if err != nil { return abort(err) } - err = migration.ProtoRecv(wsControl, &header) + var respHeader migration.MigrationHeader + err = migration.ProtoRecv(wsControl, &respHeader) if err != nil { return abort(err) } + rsyncFeaturesOffered := offerHeader.GetRsyncFeaturesSlice() + rsyncFeaturesResponse := respHeader.GetRsyncFeaturesSlice() + + if !reflect.DeepEqual(rsyncFeaturesOffered, rsyncFeaturesResponse) { + return abort(fmt.Errorf("Offered rsync features (%v) differ from those in the migration response (%v)", rsyncFeaturesOffered, rsyncFeaturesResponse)) + } + // Send the filesystem err = rsyncSend(ctx, wsFs, rootfs, rsyncArgs, instanceType) if err != nil { From e44da03ddbfc968d0594b27da10d1d7634701e8a Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Thu, 13 Jul 2023 08:18:45 +0100 Subject: [PATCH 220/543] lxd-migrate/transfer: When remote websocket reaches EOF close connection to rsync Fixes hang. Signed-off-by: Thomas Parrott --- lxd-migrate/transfer.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lxd-migrate/transfer.go b/lxd-migrate/transfer.go index 96c154373700..ea378ea73a80 100644 --- a/lxd-migrate/transfer.go +++ b/lxd-migrate/transfer.go @@ -32,6 +32,8 @@ func rsyncSend(ctx context.Context, conn *websocket.Conn, path string, rsyncArgs } readDone, writeDone := ws.Mirror(ctx, conn, dataSocket) + <-writeDone + _ = dataSocket.Close() output, err := io.ReadAll(stderr) if err != nil { @@ -42,7 +44,6 @@ func rsyncSend(ctx context.Context, conn *websocket.Conn, path string, rsyncArgs err = cmd.Wait() <-readDone - <-writeDone if err != nil { return fmt.Errorf("Failed to rsync: %v\n%s", err, output) From 59b82a75a94610a004ca5441f55473e2716cce80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Wed, 12 Jul 2023 16:28:57 +0200 Subject: [PATCH 221/543] lxd/instance/drivers/qemu: Occupy vsock Context ID through syscall MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When trying to acquire a new vsock Context ID, we want to occupy it right away to prevent collisions. This helps for scenarios where two VMs have the same identical volatile.vsock_id (VM export/import) and in case LXD is used in a nested environment. If either the acquisition or the syscall fails, the next one gets selected. Signed-off-by: Julian Pelizäus --- lxd/instance/drivers/driver_qemu.go | 127 +++++++++++------- .../drivers/driver_qemu_config_test.go | 6 +- lxd/instance/drivers/driver_qemu_templates.go | 4 +- 3 files changed, 86 insertions(+), 51 deletions(-) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index b0a07ecba6a6..ad2c02bec4d3 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -25,6 +25,7 @@ import ( "strconv" "strings" "time" + "unsafe" "github.com/flosch/pongo2" "github.com/gorilla/websocket" @@ -1136,14 +1137,27 @@ func (d *qemu) start(stateful bool, op *operationlock.InstanceOperation) error { revert.Add(func() { _ = d.unmount() }) - volatileSet := make(map[string]string) + // Define a set of files to open and pass their file descriptors to QEMU command. + fdFiles := make([]*os.File, 0) + + // Ensure passed files are closed after start has returned (either because QEMU has started or on error). + defer func() { + for _, file := range fdFiles { + _ = file.Close() + } + }() // New or existing vsock ID from volatile. - vsockID, err := d.nextVsockID() + vsockID, vsockF, err := d.nextVsockID() if err != nil { return err } + // Add allocated QEMU vhost file descriptor. + vsockFD := d.addFileDescriptor(&fdFiles, vsockF) + + volatileSet := make(map[string]string) + // Update vsock ID in volatile if needed for recovery (do this before UpdateBackupFile() call). oldVsockID := d.localConfig["volatile.vsock_id"] newVsockID := strconv.FormatUint(uint64(vsockID), 10) @@ -1346,16 +1360,6 @@ func (d *qemu) start(stateful bool, op *operationlock.InstanceOperation) error { return err } - // Define a set of files to open and pass their file descriptors to qemu command. - fdFiles := make([]*os.File, 0) - - // Ensure passed files are closed after start has returned (either because qemu has started or on error). - defer func() { - for _, file := range fdFiles { - _ = file.Close() - } - }() - // Snapshot if needed. snapName, expiry, err := d.getStartupSnapNameAndExpiry(d) if err != nil { @@ -1402,7 +1406,7 @@ func (d *qemu) start(stateful bool, op *operationlock.InstanceOperation) error { } // Generate the QEMU configuration. - confFile, monHooks, err := d.generateQemuConfigFile(cpuInfo, mountInfo, qemuBus, devConfs, &fdFiles) + confFile, monHooks, err := d.generateQemuConfigFile(cpuInfo, mountInfo, qemuBus, vsockFD, devConfs, &fdFiles) if err != nil { op.Done(err) return err @@ -2731,7 +2735,7 @@ func (d *qemu) deviceBootPriorities() (map[string]int, error) { // generateQemuConfigFile writes the qemu config file and returns its location. // It writes the config file inside the VM's log path. -func (d *qemu) generateQemuConfigFile(cpuInfo *cpuTopology, mountInfo *storagePools.MountInfo, busName string, devConfs []*deviceConfig.RunConfig, fdFiles *[]*os.File) (string, []monitorHook, error) { +func (d *qemu) generateQemuConfigFile(cpuInfo *cpuTopology, mountInfo *storagePools.MountInfo, busName string, vsockFD int, devConfs []*deviceConfig.RunConfig, fdFiles *[]*os.File) (string, []monitorHook, error) { var monHooks []monitorHook cfg := qemuBase(&qemuBaseOpts{d.Architecture()}) @@ -2859,6 +2863,7 @@ func (d *qemu) generateQemuConfigFile(cpuInfo *cpuTopology, mountInfo *storagePo devAddr: devAddr, multifunction: multi, }, + vsockFD: vsockFD, vsockID: vsockID, } @@ -7276,74 +7281,100 @@ func (d *qemu) getVsockID() (uint32, error) { return uint32(vsockID), nil } -// freeVsockID returns true if the given vsockID is not yet acquired. -func (d *qemu) freeVsockID(vsockID uint32) bool { - c, err := lxdvsock.Dial(vsockID, shared.HTTPSDefaultPort) +// acquireVsockID tries to occupy the given vsock Context ID. +// If the ID is free it returns the corresponding file handle. +func (d *qemu) acquireVsockID(vsockID uint32) (*os.File, error) { + revert := revert.New() + defer revert.Fail() + + vsockF, err := os.OpenFile("/dev/vhost-vsock", os.O_RDWR, 0) if err != nil { - var unixErrno unix.Errno + return nil, fmt.Errorf("Failed to open vhost socket: %w", err) + } - if !errors.As(err, &unixErrno) { - return false - } + revert.Add(func() { _ = vsockF.Close() }) - if unixErrno != unix.ENODEV { - // Skip the vsockID if another syscall error was encountered. - return false + // The vsock Context ID cannot be supplied as type uint32. + vsockIDInt := int(vsockID) + + // 0x4008AF60 = VHOST_VSOCK_SET_GUEST_CID = _IOW(VHOST_VIRTIO, 0x60, __u64) + _, _, errno := unix.Syscall(unix.SYS_IOCTL, vsockF.Fd(), 0x4008AF60, uintptr(unsafe.Pointer(&vsockIDInt))) + if errno != 0 { + if !errors.Is(errno, unix.EADDRINUSE) { + return nil, fmt.Errorf("Failed ioctl syscall to vhost socket: %q", errno.Error()) } - // The syscall to the vsock device returned "no such device". - // This means the address (Context ID) is free. - return true + // vsock Context ID is already in use. + return nil, nil } - // Address is already in use. - c.Close() - return false + revert.Success() + return vsockF, nil +} + +// acquireExistingVsockID tries to acquire an already existing vsock Context ID from volatile. +// It returns both the acquired ID and opened vsock file handle for QEMU. +func (d *qemu) acquireExistingVsockID() (uint32, *os.File, error) { + vsockID, err := d.getVsockID() + if err != nil { + return 0, nil, err + } + + // Check if the vsockID from last VM start is still not acquired in case the VM was stopped. + f, err := d.acquireVsockID(vsockID) + if err != nil { + return 0, nil, err + } + + return vsockID, f, nil } -// nextVsockID returns the next free vsock Context ID for the VM. -// It tries to acquire one randomly until the timeout exceeds. -func (d *qemu) nextVsockID() (uint32, error) { +// nextVsockID tries to acquire the next free vsock Context ID for the VM. +// It returns both the acquired ID and opened vsock file handle for QEMU. +func (d *qemu) nextVsockID() (uint32, *os.File, error) { // Check if vsock ID from last VM start is present in volatile, then use that. // This allows a running VM to be recovered after DB record deletion and that an agent connection still works // after the VM's instance ID has changed. // Continue in case of error since the caller requires a valid vsockID in any case. - vsockID, err := d.getVsockID() - if err == nil { - // Check if the vsock ID from last VM start is still not acquired in case the VM was stopped. - if d.freeVsockID(vsockID) { - return vsockID, nil - } + vsockID, vsockF, _ := d.acquireExistingVsockID() + if vsockID != 0 && vsockF != nil { + return vsockID, vsockF, nil } + // Ignore the error from before and start to acquire a new Context ID. instanceUUID := uuid.Parse(d.localConfig["volatile.uuid"]) if instanceUUID == nil { - return 0, fmt.Errorf("Failed to parse instance UUID from volatile.uuid") + return 0, nil, fmt.Errorf("Failed to parse instance UUID from volatile.uuid") } r, err := util.GetStableRandomGenerator(instanceUUID.String()) if err != nil { - return 0, fmt.Errorf("Failed generating stable random seed from instance UUID %q: %w", instanceUUID, err) + return 0, nil, fmt.Errorf("Failed generating stable random seed from instance UUID %q: %w", instanceUUID, err) } - timeout := 5 * time.Second + timeout := time.Now().Add(5 * time.Second) // Try to find a new Context ID. - for start := time.Now(); time.Since(start) <= timeout; { + for { + if time.Now().After(timeout) { + return 0, nil, fmt.Errorf("Timeout exceeded whilst trying to acquire the next vsock Context ID") + } + candidateVsockID := r.Uint32() if d.reservedVsockID(candidateVsockID) { continue } - if d.freeVsockID(candidateVsockID) { - return candidateVsockID, nil + vsockF, err := d.acquireVsockID(candidateVsockID) + if err != nil { + return 0, nil, err } - continue + if vsockF != nil { + return candidateVsockID, vsockF, nil + } } - - return 0, fmt.Errorf("Timeout exceeded whilst trying to acquire the next vsock Context ID") } // InitPID returns the instance's current process ID. diff --git a/lxd/instance/drivers/driver_qemu_config_test.go b/lxd/instance/drivers/driver_qemu_config_test.go index a50369722a3a..dcacfb6934ea 100644 --- a/lxd/instance/drivers/driver_qemu_config_test.go +++ b/lxd/instance/drivers/driver_qemu_config_test.go @@ -281,7 +281,7 @@ func TestQemuConfigTemplates(t *testing.T) { opts qemuVsockOpts expected string }{{ - qemuVsockOpts{qemuDevOpts{"pcie", "qemu_pcie0", "00.4", true}, 14}, + qemuVsockOpts{qemuDevOpts{"pcie", "qemu_pcie0", "00.4", true}, 4, 14}, `# Vsock [device "qemu_vsock"] driver = "vhost-vsock-pci" @@ -289,13 +289,15 @@ func TestQemuConfigTemplates(t *testing.T) { addr = "00.4" multifunction = "on" guest-cid = "14" + vhostfd = "4" `, }, { - qemuVsockOpts{qemuDevOpts{"ccw", "qemu_pcie0", "00.4", false}, 3}, + qemuVsockOpts{qemuDevOpts{"ccw", "qemu_pcie0", "00.4", false}, 4, 3}, `# Vsock [device "qemu_vsock"] driver = "vhost-vsock-ccw" guest-cid = "3" + vhostfd = "4" `, }} for _, tc := range testCases { diff --git a/lxd/instance/drivers/driver_qemu_templates.go b/lxd/instance/drivers/driver_qemu_templates.go index 1b64bf414d80..de8ce8deb5a7 100644 --- a/lxd/instance/drivers/driver_qemu_templates.go +++ b/lxd/instance/drivers/driver_qemu_templates.go @@ -299,6 +299,7 @@ func qemuRNG(opts *qemuDevOpts) []cfgSection { type qemuVsockOpts struct { dev qemuDevOpts + vsockFD int vsockID uint32 } @@ -313,7 +314,8 @@ func qemuVsock(opts *qemuVsockOpts) []cfgSection { name: `device "qemu_vsock"`, comment: "Vsock", entries: append(qemuDeviceEntries(&entriesOpts), - cfgEntry{key: "guest-cid", value: fmt.Sprintf("%d", opts.vsockID)}), + cfgEntry{key: "guest-cid", value: fmt.Sprintf("%d", opts.vsockID)}, + cfgEntry{key: "vhostfd", value: fmt.Sprintf("%d", opts.vsockFD)}), }} } From c4553dc46ff991fc037a5acae48ec4521243f6ea Mon Sep 17 00:00:00 2001 From: Yao Noel Achi Date: Tue, 11 Jul 2023 06:33:48 +0000 Subject: [PATCH 222/543] lxc/profile: Add refresh option for copy lxc profile copy command would refuse to update a profile if it already exist by sending an error message to the user. There are cases where we want to update the target profile anyway. This change adds the --refresh tag which allow to update an existing target profile with the details of the source one. Signed-off-by: Yao Noel Achi --- lxc/profile.go | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/lxc/profile.go b/lxc/profile.go index 3e1ec498a416..b3d899bac728 100644 --- a/lxc/profile.go +++ b/lxc/profile.go @@ -3,6 +3,7 @@ package main import ( "fmt" "io" + "net/http" "os" "sort" "strings" @@ -238,6 +239,7 @@ type cmdProfileCopy struct { profile *cmdProfile flagTargetProject string + flagRefresh bool } func (c *cmdProfileCopy) Command() *cobra.Command { @@ -248,6 +250,7 @@ func (c *cmdProfileCopy) Command() *cobra.Command { cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Copy profiles`)) cmd.Flags().StringVar(&c.flagTargetProject, "target-project", "", i18n.G("Copy to a project different from the source")+"``") + cmd.Flags().BoolVar(&c.flagRefresh, "refresh", false, i18n.G("Update the target profile from the source if it already exists")) cmd.RunE = c.Run @@ -284,15 +287,23 @@ func (c *cmdProfileCopy) Run(cmd *cobra.Command, args []string) error { return err } + if c.flagTargetProject != "" { + dest.server = dest.server.UseProject(c.flagTargetProject) + } + + // Refresh the profile if requested. + if c.flagRefresh { + err := dest.server.UpdateProfile(dest.name, profile.Writable(), "") + if err == nil || !api.StatusErrorCheck(err, http.StatusNotFound) { + return err + } + } + newProfile := api.ProfilesPost{ ProfilePut: profile.Writable(), Name: dest.name, } - if c.flagTargetProject != "" { - dest.server = dest.server.UseProject(c.flagTargetProject) - } - return dest.server.CreateProfile(newProfile) } From d2d318d1325ad534e3a6d1e851bb9dae849841ea Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Fri, 14 Jul 2023 07:58:43 +0200 Subject: [PATCH 223/543] doc: Add busybox-static requirement for running test suite Signed-off-by: Thomas Hipp --- doc/installing.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/installing.md b/doc/installing.md index e1d722823471..83ecb3e74c4a 100644 --- a/doc/installing.md +++ b/doc/installing.md @@ -36,7 +36,7 @@ sudo apt install btrfs-progs To run the test suite, you'll also need: ```bash -sudo apt install curl gettext jq sqlite3 socat bind9-dnsutils +sudo apt install busybox-static curl gettext jq sqlite3 socat bind9-dnsutils ``` ### From source: Build the latest version From e4247004ec811dc6bff51f129b53b90a140b64b0 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Fri, 14 Jul 2023 09:39:44 +0200 Subject: [PATCH 224/543] test: Fix flaky clustering image refresh Fixes #11979 Signed-off-by: Thomas Hipp --- test/suites/clustering.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh index 32b8e839f078..e532e9127848 100644 --- a/test/suites/clustering.sh +++ b/test/suites/clustering.sh @@ -2688,7 +2688,8 @@ test_clustering_image_refresh() { # Wait for the image to be refreshed for pid in ${pids}; do - wait "${pid}" + # Don't fail if PID isn't available as the process could be done already. + wait "${pid}" || true done if [ "${poolDriver}" != "dir" ]; then @@ -2725,7 +2726,8 @@ test_clustering_image_refresh() { # Wait for the image to be refreshed for pid in ${pids}; do - wait "${pid}" + # Don't fail if PID isn't available as the process could be done already. + wait "${pid}" || true done LXD_DIR="${LXD_ONE_DIR}" lxd sql global 'select images.fingerprint from images join projects on images.project_id=projects.id where projects.name="foo"' | grep "${old_fingerprint}" @@ -2750,7 +2752,8 @@ test_clustering_image_refresh() { # Wait for the image to be refreshed for pid in ${pids}; do - wait "${pid}" + # Don't fail if PID isn't available as the process could be done already. + wait "${pid}" || true done pids="" From cbe3afbcdd08d9c347fa0a2ef3a24211eadfd7e3 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Wed, 12 Jul 2023 16:45:13 +0200 Subject: [PATCH 225/543] ceph: Detect custom ISO volumes Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_ceph_utils.go | 12 +++++++++--- lxd/storage/drivers/driver_ceph_volumes.go | 5 ++++- lxd/storage/drivers/utils_ceph.go | 2 ++ 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/lxd/storage/drivers/driver_ceph_utils.go b/lxd/storage/drivers/driver_ceph_utils.go index 6fc836e7c52b..b8b48ba0ce64 100644 --- a/lxd/storage/drivers/driver_ceph_utils.go +++ b/lxd/storage/drivers/driver_ceph_utils.go @@ -26,6 +26,9 @@ import ( // cephBlockVolSuffix suffix used for block content type volumes. const cephBlockVolSuffix = ".block" +// cephISOVolSuffix suffix used for iso content type volumes. +const cephISOVolSuffix = ".iso" + const cephVolumeTypeZombieImage = VolumeType("zombie_image") // CephDefaultCluster represents the default ceph cluster name. @@ -899,7 +902,7 @@ func (d *ceph) parseParent(parent string) (Volume, string, error) { // Match normal instance volumes. // Looks for volumes like: // pool/container_bar@zombie_snapshot_ce77e971-6c1b-45c0-b193-dba9ec5e7d82 - reInst, err := regexp.Compile(`^((?:zombie_)?[a-z-]+)_([\w-]+)\.?(block)?@?([-\w]+)?$`) + reInst, err := regexp.Compile(`^((?:zombie_)?[a-z-]+)_([\w-]+)\.?(block|iso)?@?([-\w]+)?$`) if err != nil { return vol, "", err } @@ -910,9 +913,12 @@ func (d *ceph) parseParent(parent string) (Volume, string, error) { vol.pool = poolName vol.name = instRes[2] - if instRes[3] == "block" { + switch instRes[3] { + case "block": vol.contentType = ContentTypeBlock - } else { + case "iso": + vol.contentType = ContentTypeISO + default: vol.contentType = ContentTypeFS } diff --git a/lxd/storage/drivers/driver_ceph_volumes.go b/lxd/storage/drivers/driver_ceph_volumes.go index a49fabe68613..898765c6d329 100644 --- a/lxd/storage/drivers/driver_ceph_volumes.go +++ b/lxd/storage/drivers/driver_ceph_volumes.go @@ -1118,7 +1118,10 @@ func (d *ceph) ListVolumes() ([]Volume, error) { } contentType := ContentTypeFS - if volType == VolumeTypeVM || isBlock { + if volType == VolumeTypeCustom && strings.HasSuffix(volName, cephISOVolSuffix) { + contentType = ContentTypeISO + volName = strings.TrimSuffix(volName, cephISOVolSuffix) + } else if volType == VolumeTypeVM || isBlock { contentType = ContentTypeBlock volName = strings.TrimSuffix(volName, cephBlockVolSuffix) } diff --git a/lxd/storage/drivers/utils_ceph.go b/lxd/storage/drivers/utils_ceph.go index 99a9419f062a..ea0b1c65a95a 100644 --- a/lxd/storage/drivers/utils_ceph.go +++ b/lxd/storage/drivers/utils_ceph.go @@ -24,6 +24,8 @@ func CephGetRBDImageName(vol Volume, snapName string, zombie bool) string { if vol.contentType == ContentTypeBlock { parentName = fmt.Sprintf("%s%s", parentName, cephBlockVolSuffix) + } else if vol.contentType == ContentTypeISO { + parentName = fmt.Sprintf("%s%s", parentName, cephISOVolSuffix) } // Use volume's type as storage volume prefix, unless there is an override in cephVolTypePrefixes. From f10eb84d8168e4887b4e6a3d72682174b8a44691 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Wed, 12 Jul 2023 19:54:09 +0200 Subject: [PATCH 226/543] btrfs: Use suffixed volume name when deleting custom ISO volumes Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_btrfs_utils.go | 3 +++ lxd/storage/drivers/driver_btrfs_volumes.go | 10 ++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/lxd/storage/drivers/driver_btrfs_utils.go b/lxd/storage/drivers/driver_btrfs_utils.go index 98a96c8a66b2..ebdde5f6b65e 100644 --- a/lxd/storage/drivers/driver_btrfs_utils.go +++ b/lxd/storage/drivers/driver_btrfs_utils.go @@ -30,6 +30,9 @@ import ( var errBtrfsNoQuota = fmt.Errorf("Quotas disabled on filesystem") var errBtrfsNoQGroup = fmt.Errorf("Unable to find quota group") +// btrfsISOVolSuffix suffix used for iso content type volumes. +const btrfsISOVolSuffix = ".iso" + // setReceivedUUID sets the "Received UUID" field on a subvolume with the given path using ioctl. func setReceivedUUID(path string, UUID string) error { type btrfsIoctlReceivedSubvolArgs struct { diff --git a/lxd/storage/drivers/driver_btrfs_volumes.go b/lxd/storage/drivers/driver_btrfs_volumes.go index efab267660af..c9486e826c65 100644 --- a/lxd/storage/drivers/driver_btrfs_volumes.go +++ b/lxd/storage/drivers/driver_btrfs_volumes.go @@ -909,8 +909,14 @@ func (d *btrfs) DeleteVolume(vol Volume, op *operations.Operation) error { return fmt.Errorf("Cannot remove a volume that has snapshots") } + volName := vol.name + + if vol.volType == VolumeTypeCustom && vol.contentType == ContentTypeISO { + volName = fmt.Sprintf("%s%s", vol.name, btrfsISOVolSuffix) + } + // If the volume doesn't exist, then nothing more to do. - volPath := GetVolumeMountPath(d.name, vol.volType, vol.name) + volPath := GetVolumeMountPath(d.name, vol.volType, volName) if !shared.PathExists(volPath) { return nil } @@ -923,7 +929,7 @@ func (d *btrfs) DeleteVolume(vol Volume, op *operations.Operation) error { // Although the volume snapshot directory should already be removed, lets remove it here // to just in case the top-level directory is left. - err = deleteParentSnapshotDirIfEmpty(d.name, vol.volType, vol.name) + err = deleteParentSnapshotDirIfEmpty(d.name, vol.volType, volName) if err != nil { return err } From 893c72ab8b3f624f7d32e1919918e5dc6a7d38d1 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Wed, 12 Jul 2023 16:46:10 +0200 Subject: [PATCH 227/543] lvm: Detect custom ISO volumes Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_lvm_utils.go | 5 +++++ lxd/storage/drivers/driver_lvm_volumes.go | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/lxd/storage/drivers/driver_lvm_utils.go b/lxd/storage/drivers/driver_lvm_utils.go index 4c2f1421e7c7..4129bd84c29c 100644 --- a/lxd/storage/drivers/driver_lvm_utils.go +++ b/lxd/storage/drivers/driver_lvm_utils.go @@ -25,6 +25,9 @@ import ( // lvmBlockVolSuffix suffix used for block content type volumes. const lvmBlockVolSuffix = ".block" +// lvmISOVolSuffix suffix used for iso content type volumes. +const lvmISOVolSuffix = ".iso" + // lvmSnapshotSeparator separator character used between volume name and snaphot name in logical volume names. const lvmSnapshotSeparator = "-" @@ -472,6 +475,8 @@ func (d *lvm) lvmFullVolumeName(volType VolumeType, contentType ContentType, vol contentTypeSuffix := "" if contentType == ContentTypeBlock { contentTypeSuffix = lvmBlockVolSuffix + } else if contentType == ContentTypeISO { + contentTypeSuffix = lvmISOVolSuffix } // Escape the volume name to a name suitable for using as a logical volume. diff --git a/lxd/storage/drivers/driver_lvm_volumes.go b/lxd/storage/drivers/driver_lvm_volumes.go index 5c15cf0a53f4..30a2dca9a344 100644 --- a/lxd/storage/drivers/driver_lvm_volumes.go +++ b/lxd/storage/drivers/driver_lvm_volumes.go @@ -570,7 +570,10 @@ func (d *lvm) ListVolumes() ([]Volume, error) { volName = strings.Replace(volName, lvmEscapedHyphen, "-", -1) contentType := ContentTypeFS - if volType == VolumeTypeVM || isBlock { + if volType == VolumeTypeCustom && strings.HasSuffix(volName, lvmISOVolSuffix) { + contentType = ContentTypeISO + volName = strings.TrimSuffix(volName, lvmISOVolSuffix) + } else if volType == VolumeTypeVM || isBlock { contentType = ContentTypeBlock volName = strings.TrimSuffix(volName, lvmBlockVolSuffix) } From 61f42e8a346134dd1c00dc94ddc120729fc7ef79 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Wed, 12 Jul 2023 16:46:35 +0200 Subject: [PATCH 228/543] zfs: Detect custom ISO volumes Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_zfs_utils.go | 5 +++++ lxd/storage/drivers/driver_zfs_volumes.go | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/lxd/storage/drivers/driver_zfs_utils.go b/lxd/storage/drivers/driver_zfs_utils.go index bf8d6edd6916..026bc587d0ce 100644 --- a/lxd/storage/drivers/driver_zfs_utils.go +++ b/lxd/storage/drivers/driver_zfs_utils.go @@ -21,6 +21,9 @@ const ( // zfsBlockVolSuffix suffix used for block content type volumes. zfsBlockVolSuffix = ".block" + // zfsISOVolSuffix suffix used for iso content type volumes. + zfsISOVolSuffix = ".iso" + // zfsMinBlockSize is a minimum value for recordsize and volblocksize properties. zfsMinBlocksize = 512 @@ -40,6 +43,8 @@ func (d *zfs) dataset(vol Volume, deleted bool) string { if (vol.volType == VolumeTypeVM || vol.volType == VolumeTypeImage) && vol.contentType == ContentTypeBlock { name = fmt.Sprintf("%s%s", name, zfsBlockVolSuffix) + } else if vol.volType == VolumeTypeCustom && vol.contentType == ContentTypeISO { + name = fmt.Sprintf("%s%s", name, zfsISOVolSuffix) } if snapName != "" { diff --git a/lxd/storage/drivers/driver_zfs_volumes.go b/lxd/storage/drivers/driver_zfs_volumes.go index 4b221701ac4c..781e83d61aca 100644 --- a/lxd/storage/drivers/driver_zfs_volumes.go +++ b/lxd/storage/drivers/driver_zfs_volumes.go @@ -1854,7 +1854,10 @@ func (d *zfs) ListVolumes() ([]Volume, error) { contentType = ContentTypeBlock } - if volType == VolumeTypeVM || isBlock { + if volType == VolumeTypeCustom && isBlock && strings.HasSuffix(volName, zfsISOVolSuffix) { + contentType = ContentTypeISO + volName = strings.TrimSuffix(volName, zfsISOVolSuffix) + } else if volType == VolumeTypeVM || isBlock { volName = strings.TrimSuffix(volName, zfsBlockVolSuffix) } From 295d7dd715d01f343cedb123157ca767692feb08 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Wed, 12 Jul 2023 16:47:19 +0200 Subject: [PATCH 229/543] generic: Detect custom ISO volumes Signed-off-by: Thomas Hipp --- lxd/storage/drivers/generic_vfs.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/lxd/storage/drivers/generic_vfs.go b/lxd/storage/drivers/generic_vfs.go index d4e55c4afeae..edd4f149a434 100644 --- a/lxd/storage/drivers/generic_vfs.go +++ b/lxd/storage/drivers/generic_vfs.go @@ -30,6 +30,9 @@ const genericVolumeBlockExtension = "img" // genericVolumeDiskFile used to indicate the file name used for block volume disk files. const genericVolumeDiskFile = "root.img" +// genericISOVolumeSuffix suffix used for generic iso content type volumes. +const genericISOVolumeSuffix = ".iso" + // genericVFSGetResources is a generic GetResources implementation for VFS-only drivers. func genericVFSGetResources(d Driver) (*api.ResourcesStoragePool, error) { // Get the VFS information @@ -1097,14 +1100,21 @@ func genericVFSListVolumes(d Driver) ([]Volume, error) { } for _, ent := range ents { + volName := ent.Name() + contentType := ContentTypeFS if volType == VolumeTypeVM { contentType = ContentTypeBlock - } else if volType == VolumeTypeCustom && shared.PathExists(filepath.Join(volTypePath, ent.Name(), genericVolumeDiskFile)) { - contentType = ContentTypeBlock + } else if volType == VolumeTypeCustom && shared.PathExists(filepath.Join(volTypePath, volName, genericVolumeDiskFile)) { + if strings.HasSuffix(ent.Name(), genericISOVolumeSuffix) { + contentType = ContentTypeISO + volName = strings.TrimSuffix(volName, genericISOVolumeSuffix) + } else { + contentType = ContentTypeBlock + } } - vols = append(vols, NewVolume(d, poolName, volType, contentType, ent.Name(), make(map[string]string), poolConfig)) + vols = append(vols, NewVolume(d, poolName, volType, contentType, volName, make(map[string]string), poolConfig)) } } From fa8651ffebf6790402cda8fdef0c2d864ba500cf Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Wed, 12 Jul 2023 16:49:24 +0200 Subject: [PATCH 230/543] volume: Get mount path for custom ISO volumes Signed-off-by: Thomas Hipp --- lxd/storage/drivers/volume.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lxd/storage/drivers/volume.go b/lxd/storage/drivers/volume.go index 6115f12ec991..ea03f129a54d 100644 --- a/lxd/storage/drivers/volume.go +++ b/lxd/storage/drivers/volume.go @@ -19,6 +19,9 @@ import ( // tmpVolSuffix Suffix to use for any temporary volumes created by LXD. const tmpVolSuffix = ".lxdtmp" +// isoVolSuffix suffix used for iso content type volumes. +const isoVolSuffix = ".iso" + // DefaultBlockSize is the default size of block volumes. const DefaultBlockSize = "10GiB" @@ -161,7 +164,13 @@ func (v Volume) MountPath() string { return v.mountCustomPath } - return GetVolumeMountPath(v.pool, v.volType, v.name) + volName := v.name + + if v.volType == VolumeTypeCustom && v.contentType == ContentTypeISO { + volName = fmt.Sprintf("%s%s", volName, isoVolSuffix) + } + + return GetVolumeMountPath(v.pool, v.volType, volName) } // mountLockName returns the lock name to use for mount/unmount operations on a volume. From 1f36e614aebca1cab8ae68589ba7e6d68ce251ad Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Wed, 12 Jul 2023 16:51:23 +0200 Subject: [PATCH 231/543] patches: Rename existing custom ISO volumes This suffixes existing custom ISO volumes with ".iso" in order to distinguish them from regular custom block volumes. Signed-off-by: Thomas Hipp --- lxd/patches.go | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/lxd/patches.go b/lxd/patches.go index 29d28600009d..6b7ea8981f20 100644 --- a/lxd/patches.go +++ b/lxd/patches.go @@ -20,6 +20,7 @@ import ( "github.com/canonical/lxd/lxd/project" "github.com/canonical/lxd/lxd/revert" storagePools "github.com/canonical/lxd/lxd/storage" + storageDrivers "github.com/canonical/lxd/lxd/storage/drivers" "github.com/canonical/lxd/lxd/util" "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" @@ -72,6 +73,7 @@ var patches = []patch{ {name: "storage_missing_snapshot_records", stage: patchPostDaemonStorage, run: patchGenericStorage}, {name: "storage_delete_old_snapshot_records", stage: patchPostDaemonStorage, run: patchGenericStorage}, {name: "storage_zfs_drop_block_volume_filesystem_extension", stage: patchPostDaemonStorage, run: patchGenericStorage}, + {name: "storage_move_custom_iso_block_volumes", stage: patchPostDaemonStorage, run: patchStorageRenameCustomISOBlockVolumes}, } type patch struct { @@ -770,4 +772,71 @@ func patchNetworkClearBridgeVolatileHwaddr(name string, d *Daemon) error { return nil } +// patchStorageRenameCustomISOBlockVolumes renames existing custom ISO volumes by adding the ".iso" suffix so they can be distinguished from regular custom block volumes. +// This patch doesn't use the patchGenericStorage function because the storage drivers themselves aren't aware of custom ISO volumes. +func patchStorageRenameCustomISOBlockVolumes(name string, d *Daemon) error { + s := d.State() + + // Get all storage pool names. + pools, err := s.DB.Cluster.GetStoragePoolNames() + if err != nil { + return fmt.Errorf("Failed getting storage pool names: %w", err) + } + + volTypeCustom := db.StoragePoolVolumeTypeCustom + customPoolVolumes := make(map[string][]*db.StorageVolume, 0) + + err = s.DB.Cluster.Transaction(s.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error { + for _, pool := range pools { + // Get storage pool ID. + poolID, err := tx.GetStoragePoolID(ctx, pool) + if err != nil { + return fmt.Errorf("Failed getting storage pool ID of pool %q: %w", pool, err) + } + + // Get the pool's custom storage volumes. + customVolumes, err := tx.GetStoragePoolVolumes(ctx, poolID, false, db.StorageVolumeFilter{Type: &volTypeCustom}) + if err != nil { + return fmt.Errorf("Failed getting custom storage volumes of pool %q: %w", pool, err) + } + + if customPoolVolumes[pool] == nil { + customPoolVolumes[pool] = []*db.StorageVolume{} + } + + customPoolVolumes[pool] = append(customPoolVolumes[pool], customVolumes...) + } + + return nil + }) + if err != nil { + return err + } + + for poolName, volumes := range customPoolVolumes { + // Load storage pool. + p, err := storagePools.LoadByName(s, poolName) + if err != nil { + return fmt.Errorf("Failed loading pool %q: %w", poolName, err) + } + + for _, vol := range volumes { + // Exclude non-ISO custom volumes. + if vol.ContentType != db.StoragePoolVolumeContentTypeNameISO { + continue + } + + // We need to use ContentTypeBlock here in order for the driver to figure out the correct (old) location. + oldVol := storageDrivers.NewVolume(p.Driver(), p.Name(), storageDrivers.VolumeTypeCustom, storageDrivers.ContentTypeBlock, project.StorageVolume(vol.Project, vol.Name), nil, nil) + + err = p.Driver().RenameVolume(oldVol, fmt.Sprintf("%s.iso", oldVol.Name()), nil) + if err != nil { + return fmt.Errorf("Failed renaming volume: %w", err) + } + } + } + + return nil +} + // Patches end here From 34b3cee4e2d769469f1815714b9ef66c5f25299a Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Thu, 13 Jul 2023 15:35:47 +0200 Subject: [PATCH 232/543] storage: Detect unknown ISO custom volumes Signed-off-by: Thomas Hipp --- lxd/storage/backend_lxd.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go index 51e324089f2e..4ca55345b4d8 100644 --- a/lxd/storage/backend_lxd.go +++ b/lxd/storage/backend_lxd.go @@ -5271,6 +5271,8 @@ func (b *lxdBackend) detectUnknownCustomVolume(vol *drivers.Volume, projectVols if contentType == drivers.ContentTypeBlock { apiContentType = db.StoragePoolVolumeContentTypeNameBlock + } else if contentType == drivers.ContentTypeISO { + apiContentType = db.StoragePoolVolumeContentTypeNameISO } else if contentType == drivers.ContentTypeFS { apiContentType = db.StoragePoolVolumeContentTypeNameFS From 4ee1c5716ebcc64ee617028c586c3b7cb82d45a1 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 10:45:02 +0200 Subject: [PATCH 233/543] lxd/device: Add `checkAttachedRunningProcesses` function Signed-off-by: Gabriel Mougard --- lxd/device/device_utils_generic.go | 41 ++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/lxd/device/device_utils_generic.go b/lxd/device/device_utils_generic.go index f228407fa23d..4d0864ff1d90 100644 --- a/lxd/device/device_utils_generic.go +++ b/lxd/device/device_utils_generic.go @@ -1,7 +1,12 @@ package device import ( + "bufio" "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" "strings" "github.com/canonical/lxd/shared" @@ -21,3 +26,39 @@ func validatePCIDevice(address string) error { return nil } + +// checkAttachedRunningProcess checks if a device is tied to running processes. +func checkAttachedRunningProcesses(devicePath string) ([]string, error) { + var processes []string + procDir := "/proc" + files, err := ioutil.ReadDir(procDir) + if err != nil { + return nil, fmt.Errorf("failed to read /proc directory: %w", err) + } + + for _, file := range files { + // Check if the directory name is a number (i.e., a PID). + _, err := strconv.Atoi(file.Name()) + if err != nil { + continue + } + + mapsFile := filepath.Join(procDir, file.Name(), "maps") + f, err := os.Open(mapsFile) + if err != nil { + continue // If we can't read a process's maps file, skip it. + } + + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + if strings.Contains(scanner.Text(), devicePath) { + processes = append(processes, file.Name()) + break + } + } + } + + return processes, nil +} From 11fd474fbcaa87a99e902d5160560f546f765cf6 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 10:45:44 +0200 Subject: [PATCH 234/543] lxd/device/gpu: for VM, if `gputype=physical`, check that that no procs are tied to card before unbind Signed-off-by: Gabriel Mougard --- lxd/device/gpu_physical.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/lxd/device/gpu_physical.go b/lxd/device/gpu_physical.go index ba7e3019dc1b..58302aeeb496 100644 --- a/lxd/device/gpu_physical.go +++ b/lxd/device/gpu_physical.go @@ -225,6 +225,24 @@ func (d *gpuPhysical) startVM() (*deviceConfig.RunConfig, error) { continue } + // Check for existing running processes tied to the GPU. + // Failing early here in case of attached running processes to the card + // avoids a blocking call to os.WriteFile() when unbinding the device. + if gpu.Nvidia != nil && gpu.Nvidia.CardName != "" && shared.PathExists(filepath.Join("/dev", gpu.Nvidia.CardName)) { + devPath := filepath.Join("/dev", gpu.Nvidia.CardName) + runningProcs, err := checkAttachedRunningProcesses(devPath) + if err != nil { + return nil, err + } + + if len(runningProcs) > 0 { + return nil, fmt.Errorf( + "Cannot use device %q, %d processes are still attached to it:\n\t%s", + devPath, len(runningProcs), strings.Join(runningProcs, "\n\t"), + ) + } + } + if pciAddress != "" { return nil, fmt.Errorf("VMs cannot match multiple GPUs per device") } From 1b8b73b06fcf6a823388ef2021259163ec10b0f5 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Wed, 5 Jul 2023 17:07:13 +0200 Subject: [PATCH 235/543] client: Add `getInstanceExecOutputLogFile` and `deleteInstanceExecOutputLogFile` Signed-off-by: Gabriel Mougard --- client/lxd_instances.go | 65 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/client/lxd_instances.go b/client/lxd_instances.go index c6515de1223f..40addd9d9e8f 100644 --- a/client/lxd_instances.go +++ b/client/lxd_instances.go @@ -2094,6 +2094,71 @@ func (r *ProtocolLXD) DeleteInstanceLogfile(name string, filename string) error return nil } +// getInstanceExecOutputLogFile returns the content of the requested exec logfile. +// +// Note that it's the caller's responsibility to close the returned ReadCloser. +func (r *ProtocolLXD) getInstanceExecOutputLogFile(name string, filename string) (io.ReadCloser, error) { + err := r.CheckExtension("container_exec_recording") + if err != nil { + return nil, err + } + + path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) + if err != nil { + return nil, err + } + + // Prepare the HTTP request + url := fmt.Sprintf("%s/1.0%s/%s/logs/exec-output/%s", r.httpBaseURL.String(), path, url.PathEscape(name), url.PathEscape(filename)) + + url, err = r.setQueryAttributes(url) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + + // Send the request + resp, err := r.DoHTTP(req) + if err != nil { + return nil, err + } + + // Check the return value for a cleaner error + if resp.StatusCode != http.StatusOK { + _, _, err := lxdParseResponse(resp) + if err != nil { + return nil, err + } + } + + return resp.Body, nil +} + +// deleteInstanceExecOutputLogFiles deletes the requested exec logfile. +func (r *ProtocolLXD) deleteInstanceExecOutputLogFile(instanceName string, filename string) error { + err := r.CheckExtension("container_exec_recording") + if err != nil { + return err + } + + path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) + if err != nil { + return err + } + + // Send the request + _, _, err = r.query("DELETE", fmt.Sprintf("%s/%s/logs/exec-output/%s", path, url.PathEscape(instanceName), url.PathEscape(filename)), nil, "") + if err != nil { + return err + } + + return nil +} + // GetInstanceMetadata returns instance metadata. func (r *ProtocolLXD) GetInstanceMetadata(name string) (*api.ImageMetadata, string, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) From edaf88f777745f664134eaad87f3e19ef7e9f9eb Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Wed, 12 Jul 2023 15:14:29 +0200 Subject: [PATCH 236/543] client: stream the content of the exec log file to stdout/stderr within the `ExecInstance` func Signed-off-by: Gabriel Mougard --- client/lxd_instances.go | 58 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/client/lxd_instances.go b/client/lxd_instances.go index 40addd9d9e8f..23eaa8e4890f 100644 --- a/client/lxd_instances.go +++ b/client/lxd_instances.go @@ -9,6 +9,7 @@ import ( "net" "net/http" "net/url" + "path/filepath" "strings" "github.com/gorilla/websocket" @@ -1134,6 +1135,63 @@ func (r *ProtocolLXD) ExecInstance(instanceName string, exec api.InstanceExecPos } } + if exec.RecordOutput && (args.Stdout != nil || args.Stderr != nil) { + err = op.Wait() + if err != nil { + return nil, err + } + + opAPI = op.Get() + outputFiles := map[string]string{} + outputs, ok := opAPI.Metadata["output"].(map[string]any) + if ok { + for k, v := range outputs { + outputFiles[k] = v.(string) + } + } + + if outputFiles["1"] != "" { + reader, _ := r.getInstanceExecOutputLogFile(instanceName, filepath.Base(outputFiles["1"])) + if args.Stdout != nil { + _, errCopy := io.Copy(args.Stdout, reader) + // Regardless of errCopy value, we want to delete the file after a copy operation + errDelete := r.deleteInstanceExecOutputLogFile(instanceName, filepath.Base(outputFiles["1"])) + if errDelete != nil { + return nil, errDelete + } + + if errCopy != nil { + return nil, fmt.Errorf("Could not copy the content of the exec output log file to stdout: %w", err) + } + } + + err = r.deleteInstanceExecOutputLogFile(instanceName, filepath.Base(outputFiles["1"])) + if err != nil { + return nil, err + } + } + + if outputFiles["2"] != "" { + reader, _ := r.getInstanceExecOutputLogFile(instanceName, filepath.Base(outputFiles["2"])) + if args.Stderr != nil { + _, errCopy := io.Copy(args.Stderr, reader) + errDelete := r.deleteInstanceExecOutputLogFile(instanceName, filepath.Base(outputFiles["1"])) + if errDelete != nil { + return nil, errDelete + } + + if errCopy != nil { + return nil, fmt.Errorf("Could not copy the content of the exec output log file to stderr: %w", err) + } + } + + err = r.deleteInstanceExecOutputLogFile(instanceName, filepath.Base(outputFiles["2"])) + if err != nil { + return nil, err + } + } + } + // Call the control handler with a connection to the control socket if args.Control != nil && fds[api.SecretNameControl] != "" { conn, err := r.GetOperationWebsocket(opAPI.ID, fds[api.SecretNameControl]) From 5933773510b115b4664a6e5760481ac563e97e82 Mon Sep 17 00:00:00 2001 From: Yao Noel Achi Date: Wed, 12 Jul 2023 11:26:13 +0000 Subject: [PATCH 237/543] tests: add tests for lxc profile copy with refresh flag Add tests for both scenarios (target profile exist or not) Signed-off-by: Yao Noel Achi --- test/suites/projects.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/test/suites/projects.sh b/test/suites/projects.sh index a656cfddae77..40caa9ed4bba 100644 --- a/test/suites/projects.sh +++ b/test/suites/projects.sh @@ -320,8 +320,15 @@ test_projects_profiles() { # Try project copy lxc project create foo + lxc profile set --project default default user.x z lxc profile copy --project default --target-project foo default bar + # copy to an existing profile without --refresh should fail + ! lxc profile copy --project default --target-project foo default bar + lxc profile copy --project default --target-project foo default bar --refresh + lxc profile get --project foo bar user.x | grep -q 'z' + lxc profile copy --project default --target-project foo default bar-non-existent --refresh lxc profile delete bar --project foo + lxc profile delete bar-non-existent --project foo lxc project delete foo } From fb301784c1c27647e9046503b203fcae15714eeb Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Fri, 14 Jul 2023 10:41:54 +0100 Subject: [PATCH 238/543] Makefile: Install latest golangci-lint if not already installed. Signed-off-by: Mark Laing --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 1ba4ad9c7713..676673d2c4ea 100644 --- a/Makefile +++ b/Makefile @@ -246,7 +246,7 @@ build-mo: $(MOFILES) .PHONY: static-analysis static-analysis: ifeq ($(shell command -v golangci-lint 2> /dev/null),) - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.46.2 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$(go env GOPATH)/bin endif ifeq ($(shell command -v shellcheck 2> /dev/null),) echo "Please install shellcheck" From 098236f8acac2ed890d22b66dbf2249a6b3bceaa Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Fri, 14 Jul 2023 11:33:20 +0100 Subject: [PATCH 239/543] lxd: Fix gosimple false positive. Signed-off-by: Mark Laing --- lxd/images.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lxd/images.go b/lxd/images.go index 961ee0253f87..4e9ee1cd34ff 100644 --- a/lxd/images.go +++ b/lxd/images.go @@ -2292,8 +2292,8 @@ func pruneExpiredImages(ctx context.Context, s *state.State, op *operations.Oper } allImages = make(map[string][]dbCluster.Image, len(images)) - for i := range images { - allImages[images[i].Fingerprint] = append(allImages[images[i].Fingerprint], images[i]) + for _, image := range images { + allImages[image.Fingerprint] = append(allImages[image.Fingerprint], image) } return nil From 76c4998262ea379202d23eb6b0ebdf7fcc0448a1 Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Fri, 14 Jul 2023 11:50:31 +0100 Subject: [PATCH 240/543] shared: Remove BuildNameToCertificate. (*Config).BuildNameToCertificate has been deprecated since go1.14. We should be able to omit it. Signed-off-by: Mark Laing --- shared/network.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/shared/network.go b/shared/network.go index 9efa19f581d3..773f13d8e7db 100644 --- a/shared/network.go +++ b/shared/network.go @@ -95,8 +95,6 @@ func finalizeTLSConfig(tlsConfig *tls.Config, tlsRemoteCert *x509.Certificate) { tlsConfig.ServerName = tlsRemoteCert.DNSNames[0] } } - - tlsConfig.BuildNameToCertificate() } func GetTLSConfig(tlsClientCertFile string, tlsClientKeyFile string, tlsClientCAFile string, tlsRemoteCert *x509.Certificate) (*tls.Config, error) { From 5759e4e7f60c83d4b8d32850db8041e8e68fe98d Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Fri, 14 Jul 2023 11:53:35 +0100 Subject: [PATCH 241/543] lxd/util: Remove BuildNameToCertificate. Signed-off-by: Mark Laing --- lxd/util/net.go | 1 - 1 file changed, 1 deletion(-) diff --git a/lxd/util/net.go b/lxd/util/net.go index aa5b1a33047d..10a46a167022 100644 --- a/lxd/util/net.go +++ b/lxd/util/net.go @@ -119,7 +119,6 @@ func ServerTLSConfig(cert *shared.CertInfo) *tls.Config { logger.Infof("LXD is in CA mode, only CA-signed certificates will be allowed") } - config.BuildNameToCertificate() return config } From 19302f098885bbc007625a5280ca7e9c7ecf48f2 Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Fri, 14 Jul 2023 11:54:45 +0100 Subject: [PATCH 242/543] shared/netutils: Move C code to top of file (gci). Signed-off-by: Mark Laing --- shared/netutils/network_linux_cgo.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/shared/netutils/network_linux_cgo.go b/shared/netutils/network_linux_cgo.go index 4c4612187941..23ca4fbf3312 100644 --- a/shared/netutils/network_linux_cgo.go +++ b/shared/netutils/network_linux_cgo.go @@ -2,6 +2,12 @@ package netutils +/* +#include "unixfd.h" +#include "netns_getifaddrs.c" +*/ +import "C" + import ( "fmt" "io" @@ -13,12 +19,6 @@ import ( "github.com/canonical/lxd/shared/api" ) -/* -#include "unixfd.h" -#include "netns_getifaddrs.c" -*/ -import "C" - // Allow the caller to set expectations. // UnixFdsAcceptExact will only succeed if the exact amount of fds has been From c5128a7b93a298ee001dc77552086bce41ed8b27 Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Fri, 14 Jul 2023 11:55:02 +0100 Subject: [PATCH 243/543] shared/linux: Move C code to top of file (gci). Signed-off-by: Mark Laing --- shared/linux/socket_linux_cgo.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/shared/linux/socket_linux_cgo.go b/shared/linux/socket_linux_cgo.go index cf9015812641..7705da6b1db3 100644 --- a/shared/linux/socket_linux_cgo.go +++ b/shared/linux/socket_linux_cgo.go @@ -2,15 +2,6 @@ package linux -import ( - "fmt" - "os" - - "golang.org/x/sys/unix" - - _ "github.com/canonical/lxd/lxd/include" // Used by cgo -) - /* #ifndef _GNU_SOURCE #define _GNU_SOURCE 1 @@ -54,6 +45,15 @@ again: */ import "C" +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" + + _ "github.com/canonical/lxd/lxd/include" // Used by cgo +) + const ABSTRACT_UNIX_SOCK_LEN int = C.ABSTRACT_UNIX_SOCK_LEN func ReadPid(r *os.File) int { From 51e40d2969b609de98ca5ec44f66f1fadfd52141 Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Fri, 14 Jul 2023 11:55:24 +0100 Subject: [PATCH 244/543] shared/idmap: Move C code to top of file (gci). Signed-off-by: Mark Laing --- shared/idmap/shift_linux.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/shared/idmap/shift_linux.go b/shared/idmap/shift_linux.go index 3951462fba4e..f0602c29dd56 100644 --- a/shared/idmap/shift_linux.go +++ b/shared/idmap/shift_linux.go @@ -2,19 +2,6 @@ package idmap -import ( - "fmt" - "os" - "os/exec" - "unsafe" - - "golang.org/x/sys/unix" - - _ "github.com/canonical/lxd/lxd/include" // Used by cgo - "github.com/canonical/lxd/shared" - "github.com/canonical/lxd/shared/logger" -) - // #cgo LDFLAGS: -lacl /* #ifndef _GNU_SOURCE @@ -353,6 +340,19 @@ static int create_detached_idmapped_mount(const char *path, const char *fstype) */ import "C" +import ( + "fmt" + "os" + "os/exec" + "unsafe" + + "golang.org/x/sys/unix" + + _ "github.com/canonical/lxd/lxd/include" // Used by cgo + "github.com/canonical/lxd/shared" + "github.com/canonical/lxd/shared/logger" +) + // ShiftOwner updates uid and gid for a file when entering/exiting a namespace func ShiftOwner(basepath string, path string, uid int, gid int) error { cbasepath := C.CString(basepath) From c31231a8791fdbc368cdb2754448a1b28d30c164 Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Fri, 14 Jul 2023 11:56:10 +0100 Subject: [PATCH 245/543] lxd/storage/quota: Move C code to top of file (gci). Signed-off-by: Mark Laing --- lxd/storage/quota/projectquota.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/lxd/storage/quota/projectquota.go b/lxd/storage/quota/projectquota.go index af86d8a99445..b0e4aac83703 100644 --- a/lxd/storage/quota/projectquota.go +++ b/lxd/storage/quota/projectquota.go @@ -1,18 +1,5 @@ package quota -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "strings" - "unsafe" - - "golang.org/x/sys/unix" - - "github.com/canonical/lxd/shared" -) - /* #include #include @@ -165,6 +152,19 @@ int32_t quota_get_path(char *path) { */ import "C" +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strings" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/canonical/lxd/shared" +) + var errNoDevice = fmt.Errorf("Couldn't find backing device for mountpoint") func devForPath(path string) (string, error) { From 9d3beeb9f077dc8ace0bc50098fdcafae576392a Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Fri, 14 Jul 2023 11:56:28 +0100 Subject: [PATCH 246/543] lxd/seccomp: Move C code to top of file (gci). Signed-off-by: Mark Laing Signed-off-by: Thomas Parrott --- lxd/seccomp/seccomp.go | 62 +++++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/lxd/seccomp/seccomp.go b/lxd/seccomp/seccomp.go index 1e1ada837dec..a81f5c4474ff 100644 --- a/lxd/seccomp/seccomp.go +++ b/lxd/seccomp/seccomp.go @@ -2,37 +2,6 @@ package seccomp -import ( - "context" - "fmt" - "io" - "net" - "os" - "path" - "regexp" - "runtime" - "strconv" - "strings" - "unsafe" - - liblxc "github.com/lxc/go-lxc" - "golang.org/x/sys/unix" - - deviceConfig "github.com/canonical/lxd/lxd/device/config" - _ "github.com/canonical/lxd/lxd/include" // Used by cgo - "github.com/canonical/lxd/lxd/project" - "github.com/canonical/lxd/lxd/state" - "github.com/canonical/lxd/lxd/ucred" - "github.com/canonical/lxd/lxd/util" - "github.com/canonical/lxd/shared" - "github.com/canonical/lxd/shared/api" - "github.com/canonical/lxd/shared/idmap" - "github.com/canonical/lxd/shared/linux" - "github.com/canonical/lxd/shared/logger" - "github.com/canonical/lxd/shared/netutils" - "github.com/canonical/lxd/shared/osarch" -) - /* #ifndef _GNU_SOURCE #define _GNU_SOURCE 1 @@ -469,6 +438,37 @@ static int handle_bpf_syscall(pid_t pid_target, int notify_fd, int mem_fd, */ import "C" +import ( + "context" + "fmt" + "io" + "net" + "os" + "path" + "regexp" + "runtime" + "strconv" + "strings" + "unsafe" + + liblxc "github.com/lxc/go-lxc" + "golang.org/x/sys/unix" + + deviceConfig "github.com/canonical/lxd/lxd/device/config" + _ "github.com/canonical/lxd/lxd/include" // Used by cgo + "github.com/canonical/lxd/lxd/project" + "github.com/canonical/lxd/lxd/state" + "github.com/canonical/lxd/lxd/ucred" + "github.com/canonical/lxd/lxd/util" + "github.com/canonical/lxd/shared" + "github.com/canonical/lxd/shared/api" + "github.com/canonical/lxd/shared/idmap" + "github.com/canonical/lxd/shared/linux" + "github.com/canonical/lxd/shared/logger" + "github.com/canonical/lxd/shared/netutils" + "github.com/canonical/lxd/shared/osarch" +) + const lxdSeccompNotifyMknod = C.LXD_SECCOMP_NOTIFY_MKNOD const lxdSeccompNotifyMknodat = C.LXD_SECCOMP_NOTIFY_MKNODAT const lxdSeccompNotifySetxattr = C.LXD_SECCOMP_NOTIFY_SETXATTR From 28f21b9584f865e73c1f84e3ab1b285325abaa7e Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Fri, 22 Sep 2023 14:12:39 +0100 Subject: [PATCH 247/543] lxd/storage/backend/lxd: Fix call to VolumeDbCreate in CreateCustomVolumeFromISO Signed-off-by: Thomas Parrott --- lxd/storage/backend_lxd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go index 4ca55345b4d8..706fa02249f9 100644 --- a/lxd/storage/backend_lxd.go +++ b/lxd/storage/backend_lxd.go @@ -5606,7 +5606,7 @@ func (b *lxdBackend) CreateCustomVolumeFromISO(projectName string, volName strin } // Validate config and create database entry for new storage volume. - err = VolumeDBCreate(b, projectName, volName, "", vol.Type(), false, vol.Config(), time.Now(), time.Time{}, vol.ContentType(), true, true) + err = VolumeDBCreate(b, projectName, volName, "", vol.Type(), false, vol.Config(), time.Time{}, vol.ContentType(), true, true) if err != nil { return fmt.Errorf("Failed creating database entry for custom volume: %w", err) } From 6817f97b1a9771c417af26c37d588760bb3867e1 Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Fri, 14 Jul 2023 11:56:56 +0100 Subject: [PATCH 248/543] lxd: Move C code to top of files (gci). Signed-off-by: Mark Laing --- lxd/devices.go | 44 ++++++++++++++++++------------------- lxd/main_checkfeature.go | 22 +++++++++---------- lxd/main_forkcoresched.go | 18 +++++++-------- lxd/main_forkexec.go | 18 +++++++-------- lxd/main_forkfile.go | 26 +++++++++++----------- lxd/main_forkmount.go | 18 +++++++-------- lxd/main_forknet.go | 24 ++++++++++---------- lxd/main_forkproxy.go | 46 +++++++++++++++++++-------------------- lxd/main_forksyscall.go | 18 +++++++-------- lxd/main_forkuevent.go | 12 +++++----- lxd/main_nsexec.go | 10 ++++----- 11 files changed, 128 insertions(+), 128 deletions(-) diff --git a/lxd/devices.go b/lxd/devices.go index cdd03e4b33b4..38e56240a6c9 100644 --- a/lxd/devices.go +++ b/lxd/devices.go @@ -1,27 +1,5 @@ package main -import ( - "fmt" - "os" - "path" - "path/filepath" - "sort" - "strconv" - "strings" - - "golang.org/x/sys/unix" - - "github.com/canonical/lxd/lxd/cgroup" - "github.com/canonical/lxd/lxd/device" - _ "github.com/canonical/lxd/lxd/include" // Used by cgo - "github.com/canonical/lxd/lxd/instance" - "github.com/canonical/lxd/lxd/instance/instancetype" - "github.com/canonical/lxd/lxd/resources" - "github.com/canonical/lxd/lxd/state" - "github.com/canonical/lxd/shared" - "github.com/canonical/lxd/shared/logger" -) - /* #ifndef _GNU_SOURCE #define _GNU_SOURCE 1 @@ -54,6 +32,28 @@ static int get_hidraw_devinfo(int fd, struct hidraw_devinfo *info) */ import "C" +import ( + "fmt" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + + "golang.org/x/sys/unix" + + "github.com/canonical/lxd/lxd/cgroup" + "github.com/canonical/lxd/lxd/device" + _ "github.com/canonical/lxd/lxd/include" // Used by cgo + "github.com/canonical/lxd/lxd/instance" + "github.com/canonical/lxd/lxd/instance/instancetype" + "github.com/canonical/lxd/lxd/resources" + "github.com/canonical/lxd/lxd/state" + "github.com/canonical/lxd/shared" + "github.com/canonical/lxd/shared/logger" +) + type deviceTaskCPU struct { id int64 strId string diff --git a/lxd/main_checkfeature.go b/lxd/main_checkfeature.go index 30a41613ba7b..15ffa79b6238 100644 --- a/lxd/main_checkfeature.go +++ b/lxd/main_checkfeature.go @@ -1,16 +1,5 @@ package main -import ( - "os" - "runtime" - - "golang.org/x/sys/unix" - - _ "github.com/canonical/lxd/lxd/include" // Used by cgo - "github.com/canonical/lxd/shared" - "github.com/canonical/lxd/shared/logger" -) - /* #include "config.h" @@ -589,6 +578,17 @@ static bool kernel_supports_idmapped_mounts(void) */ import "C" +import ( + "os" + "runtime" + + "golang.org/x/sys/unix" + + _ "github.com/canonical/lxd/lxd/include" // Used by cgo + "github.com/canonical/lxd/shared" + "github.com/canonical/lxd/shared/logger" +) + func canUseNetnsGetifaddrs() bool { if !bool(C.is_empty_string(&C.errbuf[0])) { logger.Debugf("%s", C.GoString(&C.errbuf[0])) diff --git a/lxd/main_forkcoresched.go b/lxd/main_forkcoresched.go index f8e95819130e..5fc1df4a46a3 100644 --- a/lxd/main_forkcoresched.go +++ b/lxd/main_forkcoresched.go @@ -1,14 +1,5 @@ package main -import ( - "fmt" - - "github.com/spf13/cobra" - - // Used by cgo - _ "github.com/canonical/lxd/lxd/include" -) - /* #include "config.h" @@ -92,6 +83,15 @@ void forkcoresched(void) */ import "C" +import ( + "fmt" + + "github.com/spf13/cobra" + + // Used by cgo + _ "github.com/canonical/lxd/lxd/include" +) + type cmdForkcoresched struct { global *cmdGlobal } diff --git a/lxd/main_forkexec.go b/lxd/main_forkexec.go index d0401e84db87..2b76a226fb3f 100644 --- a/lxd/main_forkexec.go +++ b/lxd/main_forkexec.go @@ -1,14 +1,5 @@ package main -import ( - "fmt" - - "github.com/spf13/cobra" - - // Used by cgo - _ "github.com/canonical/lxd/lxd/include" -) - /* #include "config.h" @@ -336,6 +327,15 @@ void forkexec(void) */ import "C" +import ( + "fmt" + + "github.com/spf13/cobra" + + // Used by cgo + _ "github.com/canonical/lxd/lxd/include" +) + type cmdForkexec struct { global *cmdGlobal } diff --git a/lxd/main_forkfile.go b/lxd/main_forkfile.go index 9f2ad77484ad..36873928f184 100644 --- a/lxd/main_forkfile.go +++ b/lxd/main_forkfile.go @@ -1,18 +1,5 @@ package main -import ( - "net" - "os" - "os/signal" - "strconv" - "sync" - "time" - - "github.com/pkg/sftp" - "github.com/spf13/cobra" - "golang.org/x/sys/unix" -) - /* #include "config.h" @@ -97,6 +84,19 @@ void forkfile(void) */ import "C" +import ( + "net" + "os" + "os/signal" + "strconv" + "sync" + "time" + + "github.com/pkg/sftp" + "github.com/spf13/cobra" + "golang.org/x/sys/unix" +) + type cmdForkfile struct { global *cmdGlobal } diff --git a/lxd/main_forkmount.go b/lxd/main_forkmount.go index 5ec88c53ff5e..d0cdcf81c81f 100644 --- a/lxd/main_forkmount.go +++ b/lxd/main_forkmount.go @@ -1,14 +1,5 @@ package main -import ( - "fmt" - - "github.com/spf13/cobra" - - // Used by cgo - _ "github.com/canonical/lxd/lxd/include" -) - /* #include "config.h" @@ -658,6 +649,15 @@ void forkmount(void) */ import "C" +import ( + "fmt" + + "github.com/spf13/cobra" + + // Used by cgo + _ "github.com/canonical/lxd/lxd/include" +) + type cmdForkmount struct { global *cmdGlobal } diff --git a/lxd/main_forknet.go b/lxd/main_forknet.go index 654b10f63203..fd2bc4b10b40 100644 --- a/lxd/main_forknet.go +++ b/lxd/main_forknet.go @@ -1,17 +1,5 @@ package main -import ( - "encoding/json" - "fmt" - "net" - - "github.com/spf13/cobra" - - _ "github.com/canonical/lxd/lxd/include" // Used by cgo - "github.com/canonical/lxd/lxd/ip" - "github.com/canonical/lxd/shared/netutils" -) - /* #include "config.h" @@ -114,6 +102,18 @@ void forknet(void) */ import "C" +import ( + "encoding/json" + "fmt" + "net" + + "github.com/spf13/cobra" + + _ "github.com/canonical/lxd/lxd/include" // Used by cgo + "github.com/canonical/lxd/lxd/ip" + "github.com/canonical/lxd/shared/netutils" +) + type cmdForknet struct { global *cmdGlobal } diff --git a/lxd/main_forkproxy.go b/lxd/main_forkproxy.go index 378dcc58f355..f2c6fd10fe3d 100644 --- a/lxd/main_forkproxy.go +++ b/lxd/main_forkproxy.go @@ -1,28 +1,5 @@ package main -import ( - "fmt" - "io" - "net" - "os" - "os/signal" - "strconv" - "strings" - "sync" - "time" - "unsafe" - - "github.com/spf13/cobra" - "golang.org/x/sys/unix" - - "github.com/canonical/lxd/lxd/daemon" - deviceConfig "github.com/canonical/lxd/lxd/device/config" - _ "github.com/canonical/lxd/lxd/include" // Used by cgo - "github.com/canonical/lxd/lxd/network" - "github.com/canonical/lxd/shared" - "github.com/canonical/lxd/shared/netutils" -) - /* #include "config.h" @@ -263,6 +240,29 @@ void forkproxy(void) */ import "C" +import ( + "fmt" + "io" + "net" + "os" + "os/signal" + "strconv" + "strings" + "sync" + "time" + "unsafe" + + "github.com/spf13/cobra" + "golang.org/x/sys/unix" + + "github.com/canonical/lxd/lxd/daemon" + deviceConfig "github.com/canonical/lxd/lxd/device/config" + _ "github.com/canonical/lxd/lxd/include" // Used by cgo + "github.com/canonical/lxd/lxd/network" + "github.com/canonical/lxd/shared" + "github.com/canonical/lxd/shared/netutils" +) + const forkproxyUDSSockFDNum int = C.FORKPROXY_UDS_SOCK_FD_NUM type cmdForkproxy struct { diff --git a/lxd/main_forksyscall.go b/lxd/main_forksyscall.go index e0dde3f88e89..92223fd5a9f9 100644 --- a/lxd/main_forksyscall.go +++ b/lxd/main_forksyscall.go @@ -1,14 +1,5 @@ package main -import ( - "fmt" - - "github.com/spf13/cobra" - - // Used by cgo - _ "github.com/canonical/lxd/lxd/include" -) - /* #ifndef _GNU_SOURCE #define _GNU_SOURCE 1 @@ -662,6 +653,15 @@ void forksyscall(void) */ import "C" +import ( + "fmt" + + "github.com/spf13/cobra" + + // Used by cgo + _ "github.com/canonical/lxd/lxd/include" +) + type cmdForksyscall struct { global *cmdGlobal } diff --git a/lxd/main_forkuevent.go b/lxd/main_forkuevent.go index 43d40368347f..0f586668eb04 100644 --- a/lxd/main_forkuevent.go +++ b/lxd/main_forkuevent.go @@ -1,11 +1,5 @@ package main -import ( - "fmt" - - "github.com/spf13/cobra" -) - /* #ifndef _GNU_SOURCE #define _GNU_SOURCE 1 @@ -220,6 +214,12 @@ void forkuevent(void) */ import "C" +import ( + "fmt" + + "github.com/spf13/cobra" +) + type cmdForkuevent struct { global *cmdGlobal } diff --git a/lxd/main_nsexec.go b/lxd/main_nsexec.go index 03a242493df2..1df92717e385 100644 --- a/lxd/main_nsexec.go +++ b/lxd/main_nsexec.go @@ -18,11 +18,6 @@ */ package main -import ( - // Used by cgo - _ "github.com/canonical/lxd/lxd/include" -) - /* #include "config.h" @@ -362,3 +357,8 @@ __attribute__((constructor)) void init(void) { } */ import "C" + +import ( + // Used by cgo + _ "github.com/canonical/lxd/lxd/include" +) From ef223660bb6ef0e09065f98f28df994fb48bea27 Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Fri, 14 Jul 2023 11:57:12 +0100 Subject: [PATCH 249/543] lxc-to-lxd: Move C code to top of file (gci). Signed-off-by: Mark Laing --- lxc-to-lxd/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/lxc-to-lxd/main.go b/lxc-to-lxd/main.go index fc73b3001869..5faeebc3a80a 100644 --- a/lxc-to-lxd/main.go +++ b/lxc-to-lxd/main.go @@ -42,6 +42,7 @@ __attribute__((constructor)) void init(void) { } */ import "C" + import ( "os" From 09ca3c68b581dfd69c5bcf65c20040e013690008 Mon Sep 17 00:00:00 2001 From: Din Music Date: Thu, 13 Jul 2023 14:16:39 +0200 Subject: [PATCH 250/543] lxd/api_internal_recover: Skip unsupported storage drivers during recovery Signed-off-by: Din Music --- lxd/api_internal_recover.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lxd/api_internal_recover.go b/lxd/api_internal_recover.go index 77bea1e9f7aa..6c80e4bfbbb8 100644 --- a/lxd/api_internal_recover.go +++ b/lxd/api_internal_recover.go @@ -3,6 +3,7 @@ package main import ( "context" "encoding/json" + "errors" "fmt" "net/http" @@ -19,6 +20,7 @@ import ( "github.com/canonical/lxd/lxd/revert" "github.com/canonical/lxd/lxd/state" storagePools "github.com/canonical/lxd/lxd/storage" + storageDrivers "github.com/canonical/lxd/lxd/storage/drivers" "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/api" "github.com/canonical/lxd/shared/logger" @@ -221,6 +223,10 @@ func internalRecoverScan(s *state.State, userPools []api.StoragePoolsPost, valid // Get list of unknown volumes on pool. poolProjectVols, err := pool.ListUnknownVolumes(nil) if err != nil { + if errors.Is(err, storageDrivers.ErrNotSupported) { + continue // Ignore unsupported storage drivers. + } + return response.SmartError(fmt.Errorf("Failed checking volumes on pool %q: %w", pool.Name(), err)) } From 8171f6b505c2e7945587fb11f5925a15cbc3122b Mon Sep 17 00:00:00 2001 From: Christopher Bartz Date: Mon, 17 Jul 2023 07:49:22 +0200 Subject: [PATCH 251/543] doc: Use JSON object as term instead of dict Signed-off-by: Christopher Bartz --- doc/api-extensions.md | 2 +- doc/dev-lxd.md | 6 +++--- doc/rest-api.md | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/api-extensions.md b/doc/api-extensions.md index b03f77ce8df8..3d13dff6214b 100644 --- a/doc/api-extensions.md +++ b/doc/api-extensions.md @@ -765,7 +765,7 @@ and `<@hourly> <@daily> <@midnight> <@weekly> <@monthly> <@annually> <@yearly>` ## `container_copy_project` -Introduces a `project` field to the container source dict, allowing for +Introduces a `project` field to the container source JSON object, allowing for copy/move of containers between projects. ## `clustering_server_address` diff --git a/doc/dev-lxd.md b/doc/dev-lxd.md index 95d7af5a8f58..bf0db5d74341 100644 --- a/doc/dev-lxd.md +++ b/doc/dev-lxd.md @@ -79,7 +79,7 @@ Return value: ##### GET * Description: Information about the 1.0 API -* Return: dict +* Return: JSON object Return value: @@ -127,7 +127,7 @@ Return value: ##### GET * Description: Map of instance devices -* Return: dict +* Return: JSON object Return value: @@ -162,7 +162,7 @@ The notification types are: * `config` (changes to any of the `user.*` configuration keys) * `device` (any device addition, change or removal) -This never returns. Each notification is sent as a separate JSON dict: +This never returns. Each notification is sent as a separate JSON object: ```json { diff --git a/doc/rest-api.md b/doc/rest-api.md index 79d39adc40fe..3d0a8938b9b6 100644 --- a/doc/rest-api.md +++ b/doc/rest-api.md @@ -34,7 +34,7 @@ There are three standard return types: ### Standard return value -For a standard synchronous operation, the following dict is returned: +For a standard synchronous operation, the following JSON object is returned: ```js { @@ -52,7 +52,7 @@ HTTP code must be 200. When a request results in a background operation, the HTTP code is set to 202 (Accepted) and the Location HTTP header is set to the operation URL. -The body is a dict with the following structure: +The body is a JSON object with the following structure: ```js { @@ -161,7 +161,7 @@ A `recursion` argument can be passed to a GET query against a collection. The default value is 0 which means that collection member URLs are returned. Setting it to 1 will have those URLs be replaced by the object -they point to (typically a dict). +they point to (typically another JSON object). Recursion is implemented by simply replacing any pointer to an job (URL) by the object itself. From c28601c8f116fc8f60896fb43cb12383844e7b0c Mon Sep 17 00:00:00 2001 From: Christopher Bartz Date: Mon, 17 Jul 2023 08:18:26 +0200 Subject: [PATCH 252/543] shared/api: Use JSON object as term instead of dict Signed-off-by: Christopher Bartz --- shared/api/instance_state.go | 4 ++-- shared/api/resource.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/shared/api/instance_state.go b/shared/api/instance_state.go index cac72b983404..bb8e6bb81b6c 100644 --- a/shared/api/instance_state.go +++ b/shared/api/instance_state.go @@ -37,13 +37,13 @@ type InstanceState struct { // Example: 101 StatusCode StatusCode `json:"status_code" yaml:"status_code"` - // Dict of disk usage + // Disk usage key/value pairs Disk map[string]InstanceStateDisk `json:"disk" yaml:"disk"` // Memory usage information Memory InstanceStateMemory `json:"memory" yaml:"memory"` - // Dict of network usage + // Network usage key/value pairs Network map[string]InstanceStateNetwork `json:"network" yaml:"network"` // PID of the runtime diff --git a/shared/api/resource.go b/shared/api/resource.go index 0ccd186fb155..e98e0249058e 100644 --- a/shared/api/resource.go +++ b/shared/api/resource.go @@ -951,7 +951,7 @@ type ResourcesPCIVPD struct { // Example: HP Ethernet 1Gb 4-port 331i Adapter ProductName string `json:"product_name,omitempty" yaml:"product_name,omitempty"` - // Dict of vendor provided key/value pairs. + // Vendor provided key/value pairs. // Example: {"EC": ""A-5545", "MN": "103C", "V0": "5W PCIeGen2"} Entries map[string]string `json:"entries,omitempty" yaml:"entries,omitempty"` } From dc728fe5207dad66df736a13d7b403aef40b6d63 Mon Sep 17 00:00:00 2001 From: Christopher Bartz Date: Mon, 17 Jul 2023 08:25:44 +0200 Subject: [PATCH 253/543] lxd/operations: Use JSON object as term instead of dict Signed-off-by: Christopher Bartz --- lxd/operations.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lxd/operations.go b/lxd/operations.go index a80b5201024c..6958d1dcf537 100644 --- a/lxd/operations.go +++ b/lxd/operations.go @@ -368,7 +368,7 @@ func operationCancel(s *state.State, r *http.Request, projectName string, op *ap // // Get the operations // -// Returns a dict of operation type to operation list (URLs). +// Returns a JSON object of operation type to operation list (URLs). // // --- // produces: @@ -398,7 +398,7 @@ func operationCancel(s *state.State, r *http.Request, projectName string, op *ap // type: array // items: // type: string -// description: Dict of operation types to operation URLs +// description: JSON object of operation types to operation URLs // example: |- // { // "running": [ From a623f9b07ba1deb85bb8ebf561e5282719822c0b Mon Sep 17 00:00:00 2001 From: taltrums Date: Mon, 10 Jul 2023 17:07:45 +0530 Subject: [PATCH 254/543] lxc/cluster_group: Added lxc cluster group add Signed-off-by: taltrums --- lxc/cluster_group.go | 65 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/lxc/cluster_group.go b/lxc/cluster_group.go index 5c6e93d6311d..c631f77b785e 100644 --- a/lxc/cluster_group.go +++ b/lxc/cluster_group.go @@ -62,6 +62,10 @@ func (c *cmdClusterGroup) Command() *cobra.Command { clusterGroupShowCmd := cmdClusterGroupShow{global: c.global, cluster: c.cluster} cmd.AddCommand(clusterGroupShowCmd.Command()) + // Add + clusterGroupAddCmd := cmdClusterGroupAdd{global: c.global, cluster: c.cluster} + cmd.AddCommand(clusterGroupAddCmd.Command()) + return cmd } @@ -613,3 +617,64 @@ func (c *cmdClusterGroupShow) Run(cmd *cobra.Command, args []string) error { return nil } + +// Add. +type cmdClusterGroupAdd struct { + global *cmdGlobal + cluster *cmdCluster +} + +func (c *cmdClusterGroupAdd) Command() *cobra.Command { + cmd := &cobra.Command{} + cmd.Use = usage("add", i18n.G("[:] ")) + cmd.Short = i18n.G("Add member to group") + cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( + `Add a cluster member to a cluster group`)) + + cmd.RunE = c.Run + + return cmd +} + +func (c *cmdClusterGroupAdd) Run(cmd *cobra.Command, args []string) error { + // Quick checks. + exit, err := c.global.CheckArgs(cmd, args, 2, 2) + if exit { + return err + } + + // Parse remote + resources, err := c.global.ParseServers(args[0]) + if err != nil { + return err + } + + resource := resources[0] + + if resource.name == "" { + return fmt.Errorf(i18n.G("Missing cluster member name")) + } + + // Retrieve cluster member information. + member, etag, err := resource.server.GetClusterMember(resource.name) + if err != nil { + return err + } + + if shared.StringInSlice(args[1], member.Groups) { + return fmt.Errorf(i18n.G("Cluster member %s is already in group %s"), resource.name, args[1]) + } + + member.Groups = append(member.Groups, args[1]) + + err = resource.server.UpdateClusterMember(resource.name, member.Writable(), etag) + if err != nil { + return err + } + + if !c.global.flagQuiet { + fmt.Printf(i18n.G("Cluster member %s added to group %s")+"\n", resource.name, args[1]) + } + + return nil +} From 8f403eecca31d172f301df0b66901680eaf2e970 Mon Sep 17 00:00:00 2001 From: taltrums Date: Wed, 12 Jul 2023 11:29:54 +0530 Subject: [PATCH 255/543] doc: Added lxc cluster group add Signed-off-by: taltrums doc: Fix lxc cluster group add command Signed-off-by: taltrums --- doc/howto/cluster_groups.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/howto/cluster_groups.md b/doc/howto/cluster_groups.md index 51251a4f1b4f..cb5675b4553a 100644 --- a/doc/howto/cluster_groups.md +++ b/doc/howto/cluster_groups.md @@ -27,6 +27,12 @@ To assign `server1` to the `gpu` group and also keep it in the `default` group, lxc cluster group assign server1 default,gpu +To add a cluster member to a specific group without removing it from other groups, use the `lxc cluster group add` command. + +For example, to add `server1` to the `gpu` group and also keep it in the `default` group, use the following command: + + lxc cluster group add server1 gpu + ## Launch an instance on a cluster group member With cluster groups, you can target an instance to run on one of the members of the cluster group, instead of targeting it to run on a specific member. From 018d1971a41ee60d7f1aec744f8bb3823911c01c Mon Sep 17 00:00:00 2001 From: taltrums Date: Fri, 14 Jul 2023 18:56:23 +0530 Subject: [PATCH 256/543] test: Added tests for lxc cluster group add Signed-off-by: taltrums test: Fix test for lxc cluster group add Signed-off-by: taltrums --- test/suites/clustering.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh index e532e9127848..2896c053faa3 100644 --- a/test/suites/clustering.sh +++ b/test/suites/clustering.sh @@ -3327,6 +3327,20 @@ test_clustering_groups() { lxc cluster group create cluster:foobar2 lxc cluster group assign cluster:node3 default,foobar2 + # Create a new group "newgroup" + lxc cluster group create cluster:newgroup + [ "$(lxc query cluster:/1.0/cluster/groups/newgroup | jq '.members | length')" -eq 0 ] + + # Add node1 to the "newgroup" group + lxc cluster group add cluster:node1 newgroup + [ "$(lxc query cluster:/1.0/cluster/members/node1 | jq 'any(.groups[] == "newgroup"; .)')" = "true" ] + + # remove node1 from "newgroup" + lxc cluster group remove cluster:node1 newgroup + + # delete cluster group "newgroup" + lxc cluster group delete cluster:newgroup + # With these settings: # - node1 will receive instances unless a different node is directly targeted (not via group) # - node2 will receive instances if either targeted by group or directly From 20e5607db1acf7d07663e5329c27651a64e5c3a0 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 7 Jul 2023 11:46:33 +0200 Subject: [PATCH 257/543] lxc: Add `stringTo{ Type }HookFunc` utils func Signed-off-by: Gabriel Mougard --- lxc/utils_properties.go | 189 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 189 insertions(+) create mode 100644 lxc/utils_properties.go diff --git a/lxc/utils_properties.go b/lxc/utils_properties.go new file mode 100644 index 000000000000..0c65fb66c55a --- /dev/null +++ b/lxc/utils_properties.go @@ -0,0 +1,189 @@ +package main + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "github.com/mitchellh/mapstructure" + + "github.com/canonical/lxd/shared/i18n" +) + +// stringToTimeHookFunc is a custom decoding hook that converts string values to time.Time using the given layout. +func stringToTimeHookFunc(layout string) mapstructure.DecodeHookFuncType { + return func(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { + if from.Kind() == reflect.String && to == reflect.TypeOf(time.Time{}) { + strValue := data.(string) + t, err := time.Parse(layout, strValue) + if err != nil { + return nil, err + } + + return t, nil + } + + return data, nil + } +} + +// stringToBoolHookFunc is a custom decoding hook that converts string values to bool. +func stringToBoolHookFunc() mapstructure.DecodeHookFunc { + return func(f reflect.Kind, t reflect.Kind, data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Bool { + return data, nil + } + + str := data.(string) + str = strings.ToLower(str) + switch str { + case "1", "t", "true": + return true, nil + case "0", "f", "false": + return false, nil + default: + return false, fmt.Errorf("Invalid boolean value: %s", str) + } + } +} + +// stringToIntHookFunc is a custom decoding hook that converts string values to int. +func stringToIntHookFunc() mapstructure.DecodeHookFunc { + return func(f reflect.Kind, t reflect.Kind, data interface{}) (interface{}, error) { + if f != reflect.String || (t != reflect.Int && t != reflect.Int8 && t != reflect.Int16 && t != reflect.Int32 && t != reflect.Int64) { + return data, nil + } + + str := data.(string) + value, err := strconv.Atoi(str) + if err != nil { + return data, err + } + + return value, nil + } +} + +// stringToFloatHookFunc is a custom decoding hook that converts string values to float. +func stringToFloatHookFunc() mapstructure.DecodeHookFunc { + return func(f reflect.Kind, t reflect.Kind, data interface{}) (interface{}, error) { + if f != reflect.String || (t != reflect.Float32 && t != reflect.Float64) { + return data, nil + } + + str := data.(string) + value, err := strconv.ParseFloat(str, 64) + if err != nil { + return data, err + } + + return value, nil + } +} + +// getFieldByJsonTag gets the value of a struct field by its JSON tag. +func getFieldByJsonTag(obj any, tag string) (any, error) { + var res any + ok := false + v := reflect.ValueOf(obj).Elem() + for i := 0; i < v.NumField(); i++ { + jsonTag := v.Type().Field(i).Tag.Get("json") + + // Ignore any options that might be specified after a comma in the tag. + commaIdx := strings.Index(jsonTag, ",") + if commaIdx > 0 { + jsonTag = jsonTag[:commaIdx] + } + + if strings.EqualFold(jsonTag, tag) { + res = v.Field(i).Interface() + ok = true + } + } + + if !ok { + return nil, fmt.Errorf("The property with tag %q does not exist", tag) + } + + return res, nil +} + +// setFieldByJsonTag sets the value of a struct field by its JSON tag. +func setFieldByJsonTag(obj any, tag string, value any) { + v := reflect.ValueOf(obj).Elem() + var fieldName string + + for i := 0; i < v.NumField(); i++ { + jsonTag := v.Type().Field(i).Tag.Get("json") + commaIdx := strings.Index(jsonTag, ",") + if commaIdx > 0 { + jsonTag = jsonTag[:commaIdx] + } + + if strings.EqualFold(jsonTag, tag) { + fieldName = v.Type().Field(i).Name + } + } + + if fieldName != "" { + if v.FieldByName(fieldName).CanSet() { + v.FieldByName(fieldName).Set(reflect.ValueOf(value)) + } + } +} + +// unsetFieldByJsonTag unsets (give a default value) the value of a struct field by its JSON tag. +func unsetFieldByJsonTag(obj any, tag string) error { + v, err := getFieldByJsonTag(obj, tag) + if err != nil { + return err + } + + switch v.(type) { + case string: + setFieldByJsonTag(obj, tag, "") + case int: + setFieldByJsonTag(obj, tag, 0) + case bool: + setFieldByJsonTag(obj, tag, false) + case float32, float64: + setFieldByJsonTag(obj, tag, 0.0) + case time.Time: + setFieldByJsonTag(obj, tag, time.Time{}) + case *time.Time: + setFieldByJsonTag(obj, tag, &time.Time{}) + } + + return nil +} + +// unpackKVToWritable unpacks a map[string]string into a writable API struct. +func unpackKVToWritable(writable any, keys map[string]string) error { + data := make(map[string]any) + for k, v := range keys { + data[k] = v + } + + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + TagName: "json", + Result: writable, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + stringToBoolHookFunc(), + stringToIntHookFunc(), + stringToFloatHookFunc(), + stringToTimeHookFunc(time.RFC3339), + ), + }) + if err != nil { + return fmt.Errorf(i18n.G("Error creating decoder: %v"), err) + } + + err = decoder.Decode(data) + if err != nil { + return fmt.Errorf(i18n.G("Error decoding data: %v"), err) + } + + return nil +} From 455dec097e001b1b62c66cd5f35d17b81d83796b Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 17:49:32 +0200 Subject: [PATCH 258/543] test: Add unit tests for `utils_properties.go` Signed-off-by: Gabriel Mougard --- lxc/utils_properties_test.go | 173 +++++++++++++++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 lxc/utils_properties_test.go diff --git a/lxc/utils_properties_test.go b/lxc/utils_properties_test.go new file mode 100644 index 000000000000..03e3c88dde39 --- /dev/null +++ b/lxc/utils_properties_test.go @@ -0,0 +1,173 @@ +package main + +import ( + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/suite" +) + +type utilsPropertiesTestSuite struct { + suite.Suite +} + +func TestUtilsPropertiesTestSuite(t *testing.T) { + suite.Run(t, new(utilsPropertiesTestSuite)) +} + +func (s *utilsPropertiesTestSuite) TestStringToTimeHookFuncValidData() { + layout := time.RFC3339 + hook := stringToTimeHookFunc(layout) + + result, err := hook(reflect.TypeOf(""), reflect.TypeOf(time.Time{}), "2023-07-12T07:34:00Z") + s.NoError(err) + s.Equal(time.Date(2023, 7, 12, 7, 34, 0, 0, time.UTC), result) +} + +func (s *utilsPropertiesTestSuite) TestStringToTimeHookFuncInvalidData() { + layout := time.RFC3339 + hook := stringToTimeHookFunc(layout) + + _, err := hook(reflect.TypeOf(""), reflect.TypeOf(time.Time{}), "not a time") + s.Error(err, "Expected an error but got nil") +} + +func (s *utilsPropertiesTestSuite) TestStringToBoolHookFuncValidData() { + hookFunc := stringToBoolHookFunc() + hook := hookFunc.(func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)) + + result, err := hook(reflect.String, reflect.Bool, "t") + s.NoError(err) + s.Equal(true, result) +} + +func (s *utilsPropertiesTestSuite) TestStringToBoolHookFuncInvalidData() { + hookFunc := stringToBoolHookFunc() + hook := hookFunc.(func(reflect.Kind, reflect.Kind, any) (any, error)) + + _, err := hook(reflect.String, reflect.Bool, "not a boolean") + s.Error(err, "Expected an error but got nil") +} + +func (s *utilsPropertiesTestSuite) TestStringToIntHookFuncValidData() { + hookFunc := stringToIntHookFunc() + hook := hookFunc.(func(reflect.Kind, reflect.Kind, any) (any, error)) + + result, err := hook(reflect.String, reflect.Int, "123") + s.NoError(err) + s.Equal(123, result) +} + +func (s *utilsPropertiesTestSuite) TestStringToIntHookFuncInvalidData() { + hookFunc := stringToIntHookFunc() + hook := hookFunc.(func(reflect.Kind, reflect.Kind, any) (any, error)) + + _, err := hook(reflect.String, reflect.Int, "not an int") + s.Error(err, "Expected an error but got nil") +} + +func (s *utilsPropertiesTestSuite) TestStringToFloatHookFuncValidData() { + hookFunc := stringToFloatHookFunc() + hook := hookFunc.(func(reflect.Kind, reflect.Kind, any) (any, error)) + + result, err := hook(reflect.String, reflect.Float64, "123.45") + s.NoError(err) + s.Equal(123.45, result) +} + +func (s *utilsPropertiesTestSuite) TestStringToFloatHookFuncInvalidData() { + hookFunc := stringToFloatHookFunc() + hook := hookFunc.(func(reflect.Kind, reflect.Kind, any) (any, error)) + + _, err := hook(reflect.String, reflect.Float64, "not a float") + s.Error(err, "Expected an error but got nil") +} + +type testStruct struct { + Name string `json:"name"` + Age int `json:"age"` +} + +func (s *utilsPropertiesTestSuite) TestSetFieldByJsonTagSettable() { + ts := testStruct{ + Name: "John Doe", + Age: 30, + } + + setFieldByJsonTag(&ts, "name", "Jane Doe") + s.Equal("Jane Doe", ts.Name) +} + +func (s *utilsPropertiesTestSuite) TestSetFieldByJsonTagNonSettable() { + ts := testStruct{ + Name: "John Doe", + Age: 30, + } + + setFieldByJsonTag(&ts, "invalid name", "Jane Doe") + s.NotEqual(ts.Name, "Jane Doe") +} + +func (s *utilsPropertiesTestSuite) TestUnsetFieldByJsonTagValid() { + ts := testStruct{ + Name: "John Doe", + Age: 30, + } + + err := unsetFieldByJsonTag(&ts, "name") + s.NoError(err) + s.Equal("", ts.Name) +} + +func (s *utilsPropertiesTestSuite) TestUnsetFieldByJsonTagInvalid() { + ts := testStruct{ + Name: "John Doe", + Age: 30, + } + + err := unsetFieldByJsonTag(&ts, "invalid") + s.Error(err, "Expected an error but got nil") +} + +type writableStruct struct { + Name string `json:"name"` + Age int `json:"age"` + Score float64 `json:"score"` + Alive bool `json:"alive"` + Birth time.Time `json:"birth"` +} + +func (s *utilsPropertiesTestSuite) TestUnpackKVToWritable() { + ws := &writableStruct{} + keys := map[string]string{ + "name": "John Doe", + "age": "30", + "score": "85.5", + "alive": "true", + "birth": "2000-01-01T00:00:00Z", + } + + err := unpackKVToWritable(ws, keys) + s.NoError(err) + + s.Equal("John Doe", ws.Name) + s.Equal(30, ws.Age) + s.Equal(85.5, ws.Score) + s.Equal(true, ws.Alive) + s.Equal("2000-01-01T00:00:00Z", ws.Birth.Format(time.RFC3339)) +} + +func (s *utilsPropertiesTestSuite) TestUnpackKVToWritableInvalidData() { + ws := &writableStruct{} + keys := map[string]string{ + "name": "John Doe", + "age": "not an int", + "score": "not a float", + "alive": "not a bool", + "birth": "not a time", + } + + err := unpackKVToWritable(ws, keys) + s.Error(err, "Expected an error but got nil") +} From ecf9020c2c61b88431a8c7a10f3c4079e4727a0a Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 16:38:10 +0200 Subject: [PATCH 259/543] lxc: Update `get/set/unset` command for `cluster` properties Signed-off-by: Gabriel Mougard --- lxc/cluster.go | 49 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/lxc/cluster.go b/lxc/cluster.go index 39bd94f03739..b42ac1b5e2f2 100644 --- a/lxc/cluster.go +++ b/lxc/cluster.go @@ -242,6 +242,8 @@ func (c *cmdClusterShow) Run(cmd *cobra.Command, args []string) error { type cmdClusterGet struct { global *cmdGlobal cluster *cmdCluster + + flagIsProperty bool } func (c *cmdClusterGet) Command() *cobra.Command { @@ -250,6 +252,7 @@ func (c *cmdClusterGet) Command() *cobra.Command { cmd.Short = i18n.G("Get values for cluster member configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), cmd.Short) + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as a cluster property")) cmd.RunE = c.Run return cmd @@ -276,6 +279,17 @@ func (c *cmdClusterGet) Run(cmd *cobra.Command, args []string) error { return err } + if c.flagIsProperty { + w := member.Writable() + res, err := getFieldByJsonTag(&w, args[1]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the cluster member %q: %v"), args[1], resource.name, err) + } + + fmt.Printf("%v\n", res) + return nil + } + value, ok := member.Config[args[1]] if !ok { return fmt.Errorf(i18n.G("The key %q does not exist on cluster member %q"), args[1], resource.name) @@ -289,6 +303,8 @@ func (c *cmdClusterGet) Run(cmd *cobra.Command, args []string) error { type cmdClusterSet struct { global *cmdGlobal cluster *cmdCluster + + flagIsProperty bool } func (c *cmdClusterSet) Command() *cobra.Command { @@ -297,6 +313,7 @@ func (c *cmdClusterSet) Command() *cobra.Command { cmd.Short = i18n.G("Set a cluster member's configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), cmd.Short) + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as a cluster property")) cmd.RunE = c.Run return cmd @@ -323,17 +340,34 @@ func (c *cmdClusterSet) Run(cmd *cobra.Command, args []string) error { return err } - // Get the new config entries - entries, err := getConfig(args[1:]...) + // Get the new config keys + keys, err := getConfig(args[1:]...) if err != nil { return err } - for k, v := range entries { - member.Config[k] = v + writable := member.Writable() + if c.flagIsProperty { + if cmd.Name() == "unset" { + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting property: %v"), err) + } + } + } else { + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + } else { + for k, v := range keys { + writable.Config[k] = v + } } - return resource.server.UpdateClusterMember(resource.name, member.Writable(), "") + return resource.server.UpdateClusterMember(resource.name, writable, "") } // Unset. @@ -341,6 +375,8 @@ type cmdClusterUnset struct { global *cmdGlobal cluster *cmdCluster clusterSet *cmdClusterSet + + flagIsProperty bool } func (c *cmdClusterUnset) Command() *cobra.Command { @@ -349,6 +385,7 @@ func (c *cmdClusterUnset) Command() *cobra.Command { cmd.Short = i18n.G("Unset a cluster member's configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), cmd.Short) + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as a cluster property")) cmd.RunE = c.Run return cmd @@ -361,6 +398,8 @@ func (c *cmdClusterUnset) Run(cmd *cobra.Command, args []string) error { return err } + c.clusterSet.flagIsProperty = c.flagIsProperty + args = append(args, "") return c.clusterSet.Run(cmd, args) } From 59afadbc36540dd0d30f9cae86200fbb828cd5f3 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 16:38:52 +0200 Subject: [PATCH 260/543] lxc: Update `get/set/unset` command for `instance` properties Signed-off-by: Gabriel Mougard --- lxc/config.go | 122 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 110 insertions(+), 12 deletions(-) diff --git a/lxc/config.go b/lxc/config.go index cbc84a3cc9b0..55af0bf443fc 100644 --- a/lxc/config.go +++ b/lxc/config.go @@ -367,7 +367,8 @@ type cmdConfigGet struct { global *cmdGlobal config *cmdConfig - flagExpanded bool + flagExpanded bool + flagIsProperty bool } // Command creates a Cobra command to fetch values for given instance or server configuration keys, @@ -380,6 +381,7 @@ func (c *cmdConfigGet) Command() *cobra.Command { `Get values for instance or server configuration keys`)) cmd.Flags().BoolVarP(&c.flagExpanded, "expanded", "e", false, i18n.G("Access the expanded configuration")) + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as an instance property")) cmd.Flags().StringVar(&c.config.flagTarget, "target", "", i18n.G("Cluster member name")+"``") cmd.RunE = c.Run @@ -406,6 +408,8 @@ func (c *cmdConfigGet) Run(cmd *cobra.Command, args []string) error { } resource := resources[0] + fields := strings.SplitN(resource.name, "/", 2) + isSnapshot := len(fields) == 2 // Get the config key if resource.name != "" { @@ -414,15 +418,49 @@ func (c *cmdConfigGet) Run(cmd *cobra.Command, args []string) error { return fmt.Errorf(i18n.G("--target cannot be used with instances")) } + if isSnapshot { + inst, _, err := resource.server.GetInstanceSnapshot(fields[0], fields[1]) + if err != nil { + return err + } + + if c.flagIsProperty { + res, err := getFieldByJsonTag(inst, args[len(args)-1]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the instance snapshot %s/%s: %v"), args[len(args)-1], fields[0], fields[1], err) + } + + fmt.Printf("%v\n", res) + } else { + if c.flagExpanded { + fmt.Println(inst.ExpandedConfig[args[len(args)-1]]) + } else { + fmt.Println(inst.Config[args[len(args)-1]]) + } + } + + return nil + } + resp, _, err := resource.server.GetInstance(resource.name) if err != nil { return err } - if c.flagExpanded { - fmt.Println(resp.ExpandedConfig[args[len(args)-1]]) + if c.flagIsProperty { + w := resp.Writable() + res, err := getFieldByJsonTag(&w, args[len(args)-1]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the instance %q: %v"), args[len(args)-1], resource.name, err) + } + + fmt.Printf("%v\n", res) } else { - fmt.Println(resp.Config[args[len(args)-1]]) + if c.flagExpanded { + fmt.Println(resp.ExpandedConfig[args[len(args)-1]]) + } else { + fmt.Println(resp.Config[args[len(args)-1]]) + } } } else { // Quick check. @@ -463,6 +501,8 @@ func (c *cmdConfigGet) Run(cmd *cobra.Command, args []string) error { type cmdConfigSet struct { global *cmdGlobal config *cmdConfig + + flagIsProperty bool } // Command creates a new Cobra command to set instance or server configuration keys and returns it. @@ -486,6 +526,7 @@ lxc config set core.trust_password=blah Will set the server's trust password to blah.`)) cmd.Flags().StringVar(&c.config.flagTarget, "target", "", i18n.G("Cluster member name")+"``") + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as an instance property")) cmd.RunE = c.Run return cmd @@ -543,6 +584,8 @@ func (c *cmdConfigSet) Run(cmd *cobra.Command, args []string) error { } resource := resources[0] + fields := strings.SplitN(resource.name, "/", 2) + isSnapshot := len(fields) == 2 // Set the config keys if resource.name != "" { @@ -556,25 +599,75 @@ func (c *cmdConfigSet) Run(cmd *cobra.Command, args []string) error { return err } + if isSnapshot { + inst, etag, err := resource.server.GetInstanceSnapshot(fields[0], fields[1]) + if err != nil { + return err + } + + writable := inst.Writable() + if c.flagIsProperty { + if cmd.Name() == "unset" { + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting properties: %v"), err) + } + } + } else { + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + + op, err := resource.server.UpdateInstanceSnapshot(fields[0], fields[1], writable, etag) + if err != nil { + return err + } + + return op.Wait() + } else { + return fmt.Errorf(i18n.G("The is no config key to set on an instance snapshot.")) + } + } + inst, etag, err := resource.server.GetInstance(resource.name) if err != nil { return err } - for k, v := range keys { + writable := inst.Writable() + if c.flagIsProperty { if cmd.Name() == "unset" { - _, ok := inst.Config[k] - if !ok { - return fmt.Errorf(i18n.G("Can't unset key '%s', it's not currently set"), k) + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting properties: %v"), err) + } } - - delete(inst.Config, k) } else { - inst.Config[k] = v + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + } else { + for k, v := range keys { + if cmd.Name() == "unset" { + _, ok := writable.Config[k] + if !ok { + return fmt.Errorf(i18n.G("Can't unset key '%s', it's not currently set"), k) + } + + delete(writable.Config, k) + } else { + writable.Config[k] = v + } } } - op, err := resource.server.UpdateInstance(resource.name, inst.Writable(), etag) + op, err := resource.server.UpdateInstance(resource.name, writable, etag) if err != nil { return err } @@ -749,6 +842,8 @@ type cmdConfigUnset struct { global *cmdGlobal config *cmdConfig configSet *cmdConfigSet + + flagIsProperty bool } // Command generates a new "unset" command to remove specific configuration keys for an instance or server. @@ -760,6 +855,7 @@ func (c *cmdConfigUnset) Command() *cobra.Command { `Unset instance or server configuration keys`)) cmd.Flags().StringVar(&c.config.flagTarget, "target", "", i18n.G("Cluster member name")+"``") + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as an instance property")) cmd.RunE = c.Run return cmd @@ -773,6 +869,8 @@ func (c *cmdConfigUnset) Run(cmd *cobra.Command, args []string) error { return err } + c.configSet.flagIsProperty = c.flagIsProperty + args = append(args, "") return c.configSet.Run(cmd, args) } From 9b046b97b963251cce4cb9ce178ee51ed7eb69b0 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 16:39:12 +0200 Subject: [PATCH 261/543] lxc: Update `get/set/unset` command for `network ACL` properties Signed-off-by: Gabriel Mougard --- lxc/network_acl.go | 51 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 45 insertions(+), 6 deletions(-) diff --git a/lxc/network_acl.go b/lxc/network_acl.go index c65c79865641..9df7475a95b4 100644 --- a/lxc/network_acl.go +++ b/lxc/network_acl.go @@ -255,6 +255,8 @@ func (c *cmdNetworkACLShowLog) Run(cmd *cobra.Command, args []string) error { type cmdNetworkACLGet struct { global *cmdGlobal networkACL *cmdNetworkACL + + flagIsProperty bool } func (c *cmdNetworkACLGet) Command() *cobra.Command { @@ -262,6 +264,8 @@ func (c *cmdNetworkACLGet) Command() *cobra.Command { cmd.Use = usage("get", i18n.G("[:] ")) cmd.Short = i18n.G("Get values for network ACL configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G("Get values for network ACL configuration keys")) + + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as a network ACL property")) cmd.RunE = c.Run return cmd @@ -291,9 +295,19 @@ func (c *cmdNetworkACLGet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range resp.Config { - if k == args[1] { - fmt.Printf("%s\n", v) + if c.flagIsProperty { + w := resp.Writable() + res, err := getFieldByJsonTag(&w, args[1]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the network ACL %q: %v"), args[1], resource.name, err) + } + + fmt.Printf("%v\n", res) + } else { + for k, v := range resp.Config { + if k == args[1] { + fmt.Printf("%s\n", v) + } } } @@ -387,6 +401,8 @@ func (c *cmdNetworkACLCreate) Run(cmd *cobra.Command, args []string) error { type cmdNetworkACLSet struct { global *cmdGlobal networkACL *cmdNetworkACL + + flagIsProperty bool } func (c *cmdNetworkACLSet) Command() *cobra.Command { @@ -399,6 +415,7 @@ func (c *cmdNetworkACLSet) Command() *cobra.Command { For backward compatibility, a single configuration key may still be set with: lxc network set [:] `)) + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as a network ACL property")) cmd.RunE = c.Run return cmd @@ -435,11 +452,28 @@ func (c *cmdNetworkACLSet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range keys { - netACL.Config[k] = v + writable := netACL.Writable() + if c.flagIsProperty { + if cmd.Name() == "unset" { + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting property: %v"), err) + } + } + } else { + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + } else { + for k, v := range keys { + writable.Config[k] = v + } } - return resource.server.UpdateNetworkACL(resource.name, netACL.Writable(), etag) + return resource.server.UpdateNetworkACL(resource.name, writable, etag) } // Unset. @@ -447,6 +481,8 @@ type cmdNetworkACLUnset struct { global *cmdGlobal networkACL *cmdNetworkACL networkACLSet *cmdNetworkACLSet + + flagIsProperty bool } func (c *cmdNetworkACLUnset) Command() *cobra.Command { @@ -456,6 +492,7 @@ func (c *cmdNetworkACLUnset) Command() *cobra.Command { cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G("Unset network ACL configuration keys")) cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as a network ACL property")) return cmd } @@ -466,6 +503,8 @@ func (c *cmdNetworkACLUnset) Run(cmd *cobra.Command, args []string) error { return err } + c.networkACLSet.flagIsProperty = c.flagIsProperty + args = append(args, "") return c.networkACLSet.Run(cmd, args) } From 9a91af0a7636c016182266a7ae24b53b778d6b2a Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 16:39:34 +0200 Subject: [PATCH 262/543] lxc: Update `get/set/unset` command for `network forward` properties Signed-off-by: Gabriel Mougard --- lxc/network_forward.go | 53 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 46 insertions(+), 7 deletions(-) diff --git a/lxc/network_forward.go b/lxc/network_forward.go index 5f8b9adc201c..9b6c8233b8e6 100644 --- a/lxc/network_forward.go +++ b/lxc/network_forward.go @@ -318,6 +318,8 @@ func (c *cmdNetworkForwardCreate) Run(cmd *cobra.Command, args []string) error { type cmdNetworkForwardGet struct { global *cmdGlobal networkForward *cmdNetworkForward + + flagIsProperty bool } func (c *cmdNetworkForwardGet) Command() *cobra.Command { @@ -325,6 +327,8 @@ func (c *cmdNetworkForwardGet) Command() *cobra.Command { cmd.Use = usage("get", i18n.G("[:] ")) cmd.Short = i18n.G("Get values for network forward configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G("Get values for network forward configuration keys")) + + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as a network forward property")) cmd.RunE = c.Run return cmd @@ -360,9 +364,19 @@ func (c *cmdNetworkForwardGet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range forward.Config { - if k == args[2] { - fmt.Printf("%s\n", v) + if c.flagIsProperty { + w := forward.Writable() + res, err := getFieldByJsonTag(&w, args[2]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the network forward %q: %v"), args[1], resource.name, err) + } + + fmt.Printf("%v\n", res) + } else { + for k, v := range forward.Config { + if k == args[2] { + fmt.Printf("%s\n", v) + } } } @@ -373,6 +387,8 @@ func (c *cmdNetworkForwardGet) Run(cmd *cobra.Command, args []string) error { type cmdNetworkForwardSet struct { global *cmdGlobal networkForward *cmdNetworkForward + + flagIsProperty bool } func (c *cmdNetworkForwardSet) Command() *cobra.Command { @@ -386,6 +402,7 @@ For backward compatibility, a single configuration key may still be set with: lxc network set [:] `)) cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as a network forward property")) cmd.Flags().StringVar(&c.networkForward.flagTarget, "target", "", i18n.G("Cluster member name")+"``") return cmd @@ -437,13 +454,30 @@ func (c *cmdNetworkForwardSet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range keys { - forward.Config[k] = v + writable := forward.Writable() + if c.flagIsProperty { + if cmd.Name() == "unset" { + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting property: %v"), err) + } + } + } else { + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + } else { + for k, v := range keys { + writable.Config[k] = v + } } - forward.Normalise() + writable.Normalise() - return client.UpdateNetworkForward(resource.name, forward.ListenAddress, forward.Writable(), etag) + return client.UpdateNetworkForward(resource.name, forward.ListenAddress, writable, etag) } // Unset. @@ -451,6 +485,8 @@ type cmdNetworkForwardUnset struct { global *cmdGlobal networkForward *cmdNetworkForward networkForwardSet *cmdNetworkForwardSet + + flagIsProperty bool } func (c *cmdNetworkForwardUnset) Command() *cobra.Command { @@ -460,6 +496,7 @@ func (c *cmdNetworkForwardUnset) Command() *cobra.Command { cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G("Unset network forward keys")) cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as a network forward property")) return cmd } @@ -470,6 +507,8 @@ func (c *cmdNetworkForwardUnset) Run(cmd *cobra.Command, args []string) error { return err } + c.networkForwardSet.flagIsProperty = c.flagIsProperty + args = append(args, "") return c.networkForwardSet.Run(cmd, args) } From 354a9b3426e5632fd963e7661c3219eeb626b475 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 16:40:43 +0200 Subject: [PATCH 263/543] lxc: Update `get/set/unset` command for `network zone` properties Signed-off-by: Gabriel Mougard --- lxc/network_zone.go | 101 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 89 insertions(+), 12 deletions(-) diff --git a/lxc/network_zone.go b/lxc/network_zone.go index 9b603a646c23..0e7207ffea7d 100644 --- a/lxc/network_zone.go +++ b/lxc/network_zone.go @@ -200,6 +200,8 @@ func (c *cmdNetworkZoneShow) Run(cmd *cobra.Command, args []string) error { type cmdNetworkZoneGet struct { global *cmdGlobal networkZone *cmdNetworkZone + + flagIsProperty bool } func (c *cmdNetworkZoneGet) Command() *cobra.Command { @@ -209,6 +211,7 @@ func (c *cmdNetworkZoneGet) Command() *cobra.Command { cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G("Get values for network zone configuration keys")) cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as a network zone property")) return cmd } @@ -236,9 +239,19 @@ func (c *cmdNetworkZoneGet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range resp.Config { - if k == args[1] { - fmt.Printf("%s\n", v) + if c.flagIsProperty { + w := resp.Writable() + res, err := getFieldByJsonTag(&w, args[1]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the network zone %q: %v"), args[1], resource.name, err) + } + + fmt.Printf("%v\n", res) + } else { + for k, v := range resp.Config { + if k == args[1] { + fmt.Printf("%s\n", v) + } } } @@ -330,6 +343,8 @@ func (c *cmdNetworkZoneCreate) Run(cmd *cobra.Command, args []string) error { type cmdNetworkZoneSet struct { global *cmdGlobal networkZone *cmdNetworkZone + + flagIsProperty bool } func (c *cmdNetworkZoneSet) Command() *cobra.Command { @@ -343,6 +358,7 @@ For backward compatibility, a single configuration key may still be set with: lxc network set [:] `)) cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as a network zone property")) return cmd } @@ -378,11 +394,28 @@ func (c *cmdNetworkZoneSet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range keys { - netZone.Config[k] = v + writable := netZone.Writable() + if c.flagIsProperty { + if cmd.Name() == "unset" { + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting property: %v"), err) + } + } + } else { + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + } else { + for k, v := range keys { + writable.Config[k] = v + } } - return resource.server.UpdateNetworkZone(resource.name, netZone.Writable(), etag) + return resource.server.UpdateNetworkZone(resource.name, writable, etag) } // Unset. @@ -390,6 +423,8 @@ type cmdNetworkZoneUnset struct { global *cmdGlobal networkZone *cmdNetworkZone networkZoneSet *cmdNetworkZoneSet + + flagIsProperty bool } func (c *cmdNetworkZoneUnset) Command() *cobra.Command { @@ -399,6 +434,8 @@ func (c *cmdNetworkZoneUnset) Command() *cobra.Command { cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G("Unset network zone configuration keys")) cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as a network zone property")) + return cmd } @@ -409,6 +446,8 @@ func (c *cmdNetworkZoneUnset) Run(cmd *cobra.Command, args []string) error { return err } + c.networkZoneSet.flagIsProperty = c.flagIsProperty + args = append(args, "") return c.networkZoneSet.Run(cmd, args) } @@ -761,6 +800,8 @@ func (c *cmdNetworkZoneRecordShow) Run(cmd *cobra.Command, args []string) error type cmdNetworkZoneRecordGet struct { global *cmdGlobal networkZoneRecord *cmdNetworkZoneRecord + + flagIsProperty bool } func (c *cmdNetworkZoneRecordGet) Command() *cobra.Command { @@ -770,6 +811,7 @@ func (c *cmdNetworkZoneRecordGet) Command() *cobra.Command { cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G("Get values for network zone record configuration keys")) cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as a network zone record property")) return cmd } @@ -796,9 +838,19 @@ func (c *cmdNetworkZoneRecordGet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range resp.Config { - if k == args[2] { - fmt.Printf("%s\n", v) + if c.flagIsProperty { + w := resp.Writable() + res, err := getFieldByJsonTag(&w, args[2]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the network zone record %q: %v"), args[2], resource.name, err) + } + + fmt.Printf("%v\n", res) + } else { + for k, v := range resp.Config { + if k == args[2] { + fmt.Printf("%s\n", v) + } } } @@ -889,6 +941,8 @@ func (c *cmdNetworkZoneRecordCreate) Run(cmd *cobra.Command, args []string) erro type cmdNetworkZoneRecordSet struct { global *cmdGlobal networkZoneRecord *cmdNetworkZoneRecord + + flagIsProperty bool } func (c *cmdNetworkZoneRecordSet) Command() *cobra.Command { @@ -900,6 +954,7 @@ func (c *cmdNetworkZoneRecordSet) Command() *cobra.Command { cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as a network zone record property")) return cmd } @@ -933,11 +988,28 @@ func (c *cmdNetworkZoneRecordSet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range keys { - netRecord.Config[k] = v + writable := netRecord.Writable() + if c.flagIsProperty { + if cmd.Name() == "unset" { + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting property: %v"), err) + } + } + } else { + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + } else { + for k, v := range keys { + writable.Config[k] = v + } } - return resource.server.UpdateNetworkZoneRecord(resource.name, args[1], netRecord.Writable(), etag) + return resource.server.UpdateNetworkZoneRecord(resource.name, args[1], writable, etag) } // Unset. @@ -945,6 +1017,8 @@ type cmdNetworkZoneRecordUnset struct { global *cmdGlobal networkZoneRecord *cmdNetworkZoneRecord networkZoneRecordSet *cmdNetworkZoneRecordSet + + flagIsProperty bool } func (c *cmdNetworkZoneRecordUnset) Command() *cobra.Command { @@ -954,6 +1028,7 @@ func (c *cmdNetworkZoneRecordUnset) Command() *cobra.Command { cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G("Unset network zone record configuration keys")) cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as a network zone record property")) return cmd } @@ -964,6 +1039,8 @@ func (c *cmdNetworkZoneRecordUnset) Run(cmd *cobra.Command, args []string) error return err } + c.networkZoneRecordSet.flagIsProperty = c.flagIsProperty + args = append(args, "") return c.networkZoneRecordSet.Run(cmd, args) } From de409350ed7348077186fd0306068936f9406a74 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 16:40:23 +0200 Subject: [PATCH 264/543] lxc: Update `get/set/unset` command for `network peer` properties Signed-off-by: Gabriel Mougard --- lxc/network_peer.go | 50 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 6 deletions(-) diff --git a/lxc/network_peer.go b/lxc/network_peer.go index 16b07cfb4a26..21dea50794e9 100644 --- a/lxc/network_peer.go +++ b/lxc/network_peer.go @@ -320,6 +320,8 @@ func (c *cmdNetworkPeerCreate) Run(cmd *cobra.Command, args []string) error { type cmdNetworkPeerGet struct { global *cmdGlobal networkPeer *cmdNetworkPeer + + flagIsProperty bool } func (c *cmdNetworkPeerGet) Command() *cobra.Command { @@ -329,6 +331,7 @@ func (c *cmdNetworkPeerGet) Command() *cobra.Command { cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G("Get values for network peer configuration keys")) cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as a network peer property")) return cmd } @@ -362,9 +365,19 @@ func (c *cmdNetworkPeerGet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range peer.Config { - if k == args[2] { - fmt.Printf("%s\n", v) + if c.flagIsProperty { + w := peer.Writable() + res, err := getFieldByJsonTag(&w, args[2]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the network peer %q: %v"), args[2], resource.name, err) + } + + fmt.Printf("%v\n", res) + } else { + for k, v := range peer.Config { + if k == args[2] { + fmt.Printf("%s\n", v) + } } } @@ -375,6 +388,8 @@ func (c *cmdNetworkPeerGet) Run(cmd *cobra.Command, args []string) error { type cmdNetworkPeerSet struct { global *cmdGlobal networkPeer *cmdNetworkPeer + + flagIsProperty bool } func (c *cmdNetworkPeerSet) Command() *cobra.Command { @@ -388,6 +403,7 @@ For backward compatibility, a single configuration key may still be set with: lxc network set [:] `)) cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as a network peer property")) return cmd } @@ -432,11 +448,28 @@ func (c *cmdNetworkPeerSet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range keys { - peer.Config[k] = v + writable := peer.Writable() + if c.flagIsProperty { + if cmd.Name() == "unset" { + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting property: %v"), err) + } + } + } else { + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + } else { + for k, v := range keys { + writable.Config[k] = v + } } - return client.UpdateNetworkPeer(resource.name, peer.Name, peer.Writable(), etag) + return client.UpdateNetworkPeer(resource.name, peer.Name, writable, etag) } // Unset. @@ -444,6 +477,8 @@ type cmdNetworkPeerUnset struct { global *cmdGlobal networkPeer *cmdNetworkPeer networkPeerSet *cmdNetworkPeerSet + + flagIsProperty bool } func (c *cmdNetworkPeerUnset) Command() *cobra.Command { @@ -453,6 +488,7 @@ func (c *cmdNetworkPeerUnset) Command() *cobra.Command { cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G("Unset network peer keys")) cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as a network peer property")) return cmd } @@ -463,6 +499,8 @@ func (c *cmdNetworkPeerUnset) Run(cmd *cobra.Command, args []string) error { return err } + c.networkPeerSet.flagIsProperty = c.flagIsProperty + args = append(args, "") return c.networkPeerSet.Run(cmd, args) } From ab940234fbf952359644f4572339b89575f5582b Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 16:41:05 +0200 Subject: [PATCH 265/543] lxc: Update `get/set/unset` command for `network` properties Signed-off-by: Gabriel Mougard --- lxc/network.go | 50 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 6 deletions(-) diff --git a/lxc/network.go b/lxc/network.go index 36b291002c45..4f5f6e4d43a7 100644 --- a/lxc/network.go +++ b/lxc/network.go @@ -701,6 +701,8 @@ func (c *cmdNetworkEdit) Run(cmd *cobra.Command, args []string) error { type cmdNetworkGet struct { global *cmdGlobal network *cmdNetwork + + flagIsProperty bool } func (c *cmdNetworkGet) Command() *cobra.Command { @@ -711,6 +713,7 @@ func (c *cmdNetworkGet) Command() *cobra.Command { `Get values for network configuration keys`)) cmd.Flags().StringVar(&c.network.flagTarget, "target", "", i18n.G("Cluster member name")+"``") + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as a network property")) cmd.RunE = c.Run return cmd @@ -746,9 +749,19 @@ func (c *cmdNetworkGet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range resp.Config { - if k == args[1] { - fmt.Printf("%s\n", v) + if c.flagIsProperty { + w := resp.Writable() + res, err := getFieldByJsonTag(&w, args[1]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the network %q: %v"), args[1], resource.name, err) + } + + fmt.Printf("%v\n", res) + } else { + for k, v := range resp.Config { + if k == args[1] { + fmt.Printf("%s\n", v) + } } } @@ -1096,6 +1109,8 @@ func (c *cmdNetworkRename) Run(cmd *cobra.Command, args []string) error { type cmdNetworkSet struct { global *cmdGlobal network *cmdNetwork + + flagIsProperty bool } func (c *cmdNetworkSet) Command() *cobra.Command { @@ -1109,6 +1124,7 @@ For backward compatibility, a single configuration key may still be set with: lxc network set [:] `)) cmd.Flags().StringVar(&c.network.flagTarget, "target", "", i18n.G("Cluster member name")+"``") + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as a network property")) cmd.RunE = c.Run return cmd @@ -1155,11 +1171,28 @@ func (c *cmdNetworkSet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range keys { - network.Config[k] = v + writable := network.Writable() + if c.flagIsProperty { + if cmd.Name() == "unset" { + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting property: %v"), err) + } + } + } else { + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + } else { + for k, v := range keys { + writable.Config[k] = v + } } - return client.UpdateNetwork(resource.name, network.Writable(), etag) + return client.UpdateNetwork(resource.name, writable, etag) } // Show. @@ -1228,6 +1261,8 @@ type cmdNetworkUnset struct { global *cmdGlobal network *cmdNetwork networkSet *cmdNetworkSet + + flagIsProperty bool } func (c *cmdNetworkUnset) Command() *cobra.Command { @@ -1238,6 +1273,7 @@ func (c *cmdNetworkUnset) Command() *cobra.Command { `Unset network configuration keys`)) cmd.Flags().StringVar(&c.network.flagTarget, "target", "", i18n.G("Cluster member name")+"``") + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as a network property")) cmd.RunE = c.Run return cmd @@ -1250,6 +1286,8 @@ func (c *cmdNetworkUnset) Run(cmd *cobra.Command, args []string) error { return err } + c.networkSet.flagIsProperty = c.flagIsProperty + args = append(args, "") return c.networkSet.Run(cmd, args) } From 3d1406489c092d3c7252ed253453b71b91ff30df Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 16:41:22 +0200 Subject: [PATCH 266/543] lxc: Update `get/set/unset` command for `profile` properties Signed-off-by: Gabriel Mougard --- lxc/profile.go | 48 +++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 43 insertions(+), 5 deletions(-) diff --git a/lxc/profile.go b/lxc/profile.go index b3d899bac728..6d5bb219d183 100644 --- a/lxc/profile.go +++ b/lxc/profile.go @@ -541,6 +541,8 @@ func (c *cmdProfileEdit) Run(cmd *cobra.Command, args []string) error { type cmdProfileGet struct { global *cmdGlobal profile *cmdProfile + + flagIsProperty bool } func (c *cmdProfileGet) Command() *cobra.Command { @@ -552,6 +554,7 @@ func (c *cmdProfileGet) Command() *cobra.Command { cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as a profile property")) return cmd } @@ -580,7 +583,18 @@ func (c *cmdProfileGet) Run(cmd *cobra.Command, args []string) error { return err } - fmt.Printf("%s\n", profile.Config[args[1]]) + if c.flagIsProperty { + w := profile.Writable() + res, err := getFieldByJsonTag(&w, args[1]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the profile %q: %v"), args[1], resource.name, err) + } + + fmt.Printf("%v\n", res) + } else { + fmt.Printf("%s\n", profile.Config[args[1]]) + } + return nil } @@ -777,6 +791,8 @@ func (c *cmdProfileRename) Run(cmd *cobra.Command, args []string) error { type cmdProfileSet struct { global *cmdGlobal profile *cmdProfile + + flagIsProperty bool } func (c *cmdProfileSet) Command() *cobra.Command { @@ -790,7 +806,7 @@ For backward compatibility, a single configuration key may still be set with: lxc profile set [:] `)) cmd.RunE = c.Run - + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as a profile property")) return cmd } @@ -825,11 +841,28 @@ func (c *cmdProfileSet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range keys { - profile.Config[k] = v + writable := profile.Writable() + if c.flagIsProperty { + if cmd.Name() == "unset" { + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting property: %v"), err) + } + } + } else { + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + } else { + for k, v := range keys { + writable.Config[k] = v + } } - return resource.server.UpdateProfile(resource.name, profile.Writable(), etag) + return resource.server.UpdateProfile(resource.name, writable, etag) } // Show. @@ -890,6 +923,8 @@ type cmdProfileUnset struct { global *cmdGlobal profile *cmdProfile profileSet *cmdProfileSet + + flagIsProperty bool } func (c *cmdProfileUnset) Command() *cobra.Command { @@ -900,6 +935,7 @@ func (c *cmdProfileUnset) Command() *cobra.Command { `Unset profile configuration keys`)) cmd.RunE = c.Run + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as a profile property")) return cmd } @@ -911,6 +947,8 @@ func (c *cmdProfileUnset) Run(cmd *cobra.Command, args []string) error { return err } + c.profileSet.flagIsProperty = c.flagIsProperty + args = append(args, "") return c.profileSet.Run(cmd, args) } From 0cae43b7107503c9206f480296d9192da539f74f Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 16:41:38 +0200 Subject: [PATCH 267/543] lxc: Update `get/set/unset` command for `project` properties Signed-off-by: Gabriel Mougard --- lxc/project.go | 50 +++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 43 insertions(+), 7 deletions(-) diff --git a/lxc/project.go b/lxc/project.go index 3cae1821b7d4..d6454263283f 100644 --- a/lxc/project.go +++ b/lxc/project.go @@ -337,6 +337,8 @@ func (c *cmdProjectEdit) Run(cmd *cobra.Command, args []string) error { type cmdProjectGet struct { global *cmdGlobal project *cmdProject + + flagIsProperty bool } func (c *cmdProjectGet) Command() *cobra.Command { @@ -347,7 +349,7 @@ func (c *cmdProjectGet) Command() *cobra.Command { `Get values for project configuration keys`)) cmd.RunE = c.Run - + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as a project property")) return cmd } @@ -376,7 +378,18 @@ func (c *cmdProjectGet) Run(cmd *cobra.Command, args []string) error { return err } - fmt.Printf("%s\n", project.Config[args[1]]) + if c.flagIsProperty { + w := project.Writable() + res, err := getFieldByJsonTag(&w, args[1]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the project %q: %v"), args[1], resource.name, err) + } + + fmt.Printf("%v\n", res) + } else { + fmt.Printf("%s\n", project.Config[args[1]]) + } + return nil } @@ -543,6 +556,8 @@ func (c *cmdProjectRename) Run(cmd *cobra.Command, args []string) error { type cmdProjectSet struct { global *cmdGlobal project *cmdProject + + flagIsProperty bool } func (c *cmdProjectSet) Command() *cobra.Command { @@ -556,7 +571,7 @@ For backward compatibility, a single configuration key may still be set with: lxc project set [:] `)) cmd.RunE = c.Run - + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as a project property")) return cmd } @@ -591,11 +606,28 @@ func (c *cmdProjectSet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range keys { - project.Config[k] = v + writable := project.Writable() + if c.flagIsProperty { + if cmd.Name() == "unset" { + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting property: %v"), err) + } + } + } else { + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + } else { + for k, v := range keys { + writable.Config[k] = v + } } - return resource.server.UpdateProject(resource.name, project.Writable(), etag) + return resource.server.UpdateProject(resource.name, writable, etag) } // Unset. @@ -603,6 +635,8 @@ type cmdProjectUnset struct { global *cmdGlobal project *cmdProject projectSet *cmdProjectSet + + flagIsProperty bool } func (c *cmdProjectUnset) Command() *cobra.Command { @@ -613,7 +647,7 @@ func (c *cmdProjectUnset) Command() *cobra.Command { `Unset project configuration keys`)) cmd.RunE = c.Run - + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as a project property")) return cmd } @@ -624,6 +658,8 @@ func (c *cmdProjectUnset) Run(cmd *cobra.Command, args []string) error { return err } + c.projectSet.flagIsProperty = c.flagIsProperty + args = append(args, "") return c.projectSet.Run(cmd, args) } From b5791792fd126a84675ef07dc03288ce6c34c443 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 16:42:16 +0200 Subject: [PATCH 268/543] lxc: Update `get/set/unset` command for `storage volume` properties Signed-off-by: Gabriel Mougard --- lxc/storage_volume.go | 94 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 83 insertions(+), 11 deletions(-) diff --git a/lxc/storage_volume.go b/lxc/storage_volume.go index cf4b3f5c251d..c7ce8d8feb02 100644 --- a/lxc/storage_volume.go +++ b/lxc/storage_volume.go @@ -1045,6 +1045,8 @@ type cmdStorageVolumeGet struct { global *cmdGlobal storage *cmdStorage storageVolume *cmdStorageVolume + + flagIsProperty bool } func (c *cmdStorageVolumeGet) Command() *cobra.Command { @@ -1066,6 +1068,7 @@ lxc storage volume get default virtual-machine/data snapshots.expiry Returns the snapshot expiration period for a virtual machine "data" in pool "default".`)) cmd.Flags().StringVar(&c.storage.flagTarget, "target", "", i18n.G("Cluster member name")+"``") + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as a storage volume property")) cmd.RunE = c.Run return cmd @@ -1109,15 +1112,22 @@ func (c *cmdStorageVolumeGet) Run(cmd *cobra.Command, args []string) error { } if isSnapshot { - // Get the storage volume snapshot entry resp, _, err := client.GetStoragePoolVolumeSnapshot(resource.name, volType, fields[0], fields[1]) if err != nil { return err } - for k, v := range resp.Config { - if k == args[2] { - fmt.Printf("%s\n", v) + if c.flagIsProperty { + res, err := getFieldByJsonTag(resp, args[2]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the storage pool volume snapshot %s/%s: %v"), args[2], fields[0], fields[1], err) + } + + fmt.Printf("%v\n", res) + } else { + v, ok := resp.Config[args[2]] + if ok { + fmt.Println(v) } } @@ -1130,9 +1140,17 @@ func (c *cmdStorageVolumeGet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range resp.Config { - if k == args[2] { - fmt.Printf("%s\n", v) + if c.flagIsProperty { + res, err := getFieldByJsonTag(resp, args[2]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the storage pool volume %q: %v"), args[2], resource.name, err) + } + + fmt.Printf("%v\n", res) + } else { + v, ok := resp.Config[args[2]] + if ok { + fmt.Println(v) } } @@ -1725,6 +1743,8 @@ type cmdStorageVolumeSet struct { global *cmdGlobal storage *cmdStorage storageVolume *cmdStorageVolume + + flagIsProperty bool } func (c *cmdStorageVolumeSet) Command() *cobra.Command { @@ -1747,6 +1767,7 @@ lxc storage volume set default virtual-machine/data snapshots.expiry=7d Sets the snapshot expiration period for a virtual machine "data" in pool "default" to seven days.`)) cmd.Flags().StringVar(&c.storage.flagTarget, "target", "", i18n.G("Cluster member name")+"``") + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as a storage volume property")) cmd.RunE = c.Run return cmd @@ -1790,6 +1811,35 @@ func (c *cmdStorageVolumeSet) Run(cmd *cobra.Command, args []string) error { } if isSnapshot { + if c.flagIsProperty { + snapVol, etag, err := client.GetStoragePoolVolumeSnapshot(resource.name, volType, fields[0], fields[1]) + if err != nil { + return err + } + + writable := snapVol.Writable() + if cmd.Name() == "unset" { + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting property: %v"), err) + } + } + } else { + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + + err = client.UpdateStoragePoolVolumeSnapshot(resource.name, volType, fields[0], fields[1], writable, etag) + if err != nil { + return err + } + + return nil + } + return fmt.Errorf(i18n.G("Snapshots are read-only and can't have their configuration changed")) } @@ -1804,12 +1854,29 @@ func (c *cmdStorageVolumeSet) Run(cmd *cobra.Command, args []string) error { return err } - // Update the volume. - for k, v := range keys { - vol.Config[k] = v + writable := vol.Writable() + if c.flagIsProperty { + if cmd.Name() == "unset" { + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting property: %v"), err) + } + } + } else { + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + } else { + // Update the volume config keys. + for k, v := range keys { + writable.Config[k] = v + } } - err = client.UpdateStoragePoolVolume(resource.name, vol.Type, vol.Name, vol.Writable(), etag) + err = client.UpdateStoragePoolVolume(resource.name, vol.Type, vol.Name, writable, etag) if err != nil { return err } @@ -1929,6 +1996,8 @@ type cmdStorageVolumeUnset struct { storage *cmdStorage storageVolume *cmdStorageVolume storageVolumeSet *cmdStorageVolumeSet + + flagIsProperty bool } func (c *cmdStorageVolumeUnset) Command() *cobra.Command { @@ -1948,6 +2017,7 @@ lxc storage volume unset default virtual-machine/data snapshots.expiry Removes the snapshot expiration period for a virtual machine "data" in pool "default".`)) cmd.Flags().StringVar(&c.storage.flagTarget, "target", "", i18n.G("Cluster member name")+"``") + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as a storage volume property")) cmd.RunE = c.Run return cmd @@ -1960,6 +2030,8 @@ func (c *cmdStorageVolumeUnset) Run(cmd *cobra.Command, args []string) error { return err } + c.storageVolumeSet.flagIsProperty = c.flagIsProperty + args = append(args, "") return c.storageVolumeSet.Run(cmd, args) } From 2a23c4843931b2643d850885098bfe0c217d0388 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 16:42:38 +0200 Subject: [PATCH 269/543] lxc: Update `get/set/unset` command for `storage` properties Signed-off-by: Gabriel Mougard --- lxc/storage.go | 51 +++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 44 insertions(+), 7 deletions(-) diff --git a/lxc/storage.go b/lxc/storage.go index 5e57ee81290d..56f1408f1bbf 100644 --- a/lxc/storage.go +++ b/lxc/storage.go @@ -329,6 +329,8 @@ func (c *cmdStorageEdit) Run(cmd *cobra.Command, args []string) error { type cmdStorageGet struct { global *cmdGlobal storage *cmdStorage + + flagIsProperty bool } func (c *cmdStorageGet) Command() *cobra.Command { @@ -339,6 +341,7 @@ func (c *cmdStorageGet) Command() *cobra.Command { `Get values for storage pool configuration keys`)) cmd.Flags().StringVar(&c.storage.flagTarget, "target", "", i18n.G("Cluster member name")+"``") + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as a storage property")) cmd.RunE = c.Run return cmd @@ -374,9 +377,18 @@ func (c *cmdStorageGet) Run(cmd *cobra.Command, args []string) error { return err } - for k, v := range resp.Config { - if k == args[1] { - fmt.Printf("%s\n", v) + if c.flagIsProperty { + w := resp.Writable() + res, err := getFieldByJsonTag(&w, args[1]) + if err != nil { + return fmt.Errorf(i18n.G("The property %q does not exist on the storage pool %q: %v"), args[1], resource.name, err) + } + + fmt.Printf("%v\n", res) + } else { + v, ok := resp.Config[args[1]] + if ok { + fmt.Println(v) } } @@ -638,6 +650,8 @@ func (c *cmdStorageList) Run(cmd *cobra.Command, args []string) error { type cmdStorageSet struct { global *cmdGlobal storage *cmdStorage + + flagIsProperty bool } func (c *cmdStorageSet) Command() *cobra.Command { @@ -651,6 +665,7 @@ For backward compatibility, a single configuration key may still be set with: lxc storage set [:] `)) cmd.Flags().StringVar(&c.storage.flagTarget, "target", "", i18n.G("Cluster member name")+"``") + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as a storage property")) cmd.RunE = c.Run return cmd @@ -693,12 +708,29 @@ func (c *cmdStorageSet) Run(cmd *cobra.Command, args []string) error { return err } - // Update the pool - for k, v := range keys { - pool.Config[k] = v + writable := pool.Writable() + if c.flagIsProperty { + if cmd.Name() == "unset" { + for k := range keys { + err := unsetFieldByJsonTag(&writable, k) + if err != nil { + return fmt.Errorf(i18n.G("Error unsetting property: %v"), err) + } + } + } else { + err := unpackKVToWritable(&writable, keys) + if err != nil { + return fmt.Errorf(i18n.G("Error setting properties: %v"), err) + } + } + } else { + // Update the volume config keys. + for k, v := range keys { + writable.Config[k] = v + } } - err = client.UpdateStoragePool(resource.name, pool.Writable(), etag) + err = client.UpdateStoragePool(resource.name, writable, etag) if err != nil { return err } @@ -796,6 +828,8 @@ type cmdStorageUnset struct { global *cmdGlobal storage *cmdStorage storageSet *cmdStorageSet + + flagIsProperty bool } func (c *cmdStorageUnset) Command() *cobra.Command { @@ -806,6 +840,7 @@ func (c *cmdStorageUnset) Command() *cobra.Command { `Unset storage pool configuration keys`)) cmd.Flags().StringVar(&c.storage.flagTarget, "target", "", i18n.G("Cluster member name")+"``") + cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as a storage property")) cmd.RunE = c.Run return cmd @@ -818,6 +853,8 @@ func (c *cmdStorageUnset) Run(cmd *cobra.Command, args []string) error { return err } + c.storageSet.flagIsProperty = c.flagIsProperty + args = append(args, "") return c.storageSet.Run(cmd, args) } From e8c7466f30f1d8b13c4e76f7a2260452bce9a584 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Thu, 13 Jul 2023 09:58:34 +0200 Subject: [PATCH 270/543] zfs: Add lxd:content_type user property Adding the `lxd:content_type` user property allows distinguishing between regular volumes, block_mode enabled volumes, and ISO volumes. Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_zfs_volumes.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lxd/storage/drivers/driver_zfs_volumes.go b/lxd/storage/drivers/driver_zfs_volumes.go index 781e83d61aca..d419afdf0d85 100644 --- a/lxd/storage/drivers/driver_zfs_volumes.go +++ b/lxd/storage/drivers/driver_zfs_volumes.go @@ -174,6 +174,11 @@ func (d *zfs) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Oper opts = []string{"volmode=none"} } + // Add custom property lxd:content_type which allows distinguishing between regular volumes, block_mode enabled volumes, and ISO volumes. + if vol.volType == VolumeTypeCustom { + opts = append(opts, fmt.Sprintf("lxd:content_type=%s", vol.contentType)) + } + // Avoid double caching in the ARC cache and in the guest OS filesystem cache. if vol.volType == VolumeTypeVM { opts = append(opts, "primarycache=metadata", "secondarycache=metadata") From 421d97aebee01a6f33b04d3ac909ab6ed4657e31 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Thu, 13 Jul 2023 11:38:51 +0200 Subject: [PATCH 271/543] zfs: Get content type from lxd:content_type property When listing volumes, get the correct content type from the lxd:content_type property. This allows proper detection of block-mode enabled zvols. Signed-off-by: Thomas Hipp --- lxd/storage/drivers/driver_zfs_volumes.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/lxd/storage/drivers/driver_zfs_volumes.go b/lxd/storage/drivers/driver_zfs_volumes.go index d419afdf0d85..3772186d0d8b 100644 --- a/lxd/storage/drivers/driver_zfs_volumes.go +++ b/lxd/storage/drivers/driver_zfs_volumes.go @@ -1802,7 +1802,7 @@ func (d *zfs) ListVolumes() ([]Volume, error) { // However for custom block volumes it does not also end the volume name in zfsBlockVolSuffix (unlike the // LVM and Ceph drivers), so we must also retrieve the dataset type here and look for "volume" types // which also indicate this is a block volume. - cmd := exec.Command("zfs", "list", "-H", "-o", "name,type", "-r", "-t", "filesystem,volume", d.config["zfs.pool_name"]) + cmd := exec.Command("zfs", "list", "-H", "-o", "name,type,lxd:content_type", "-r", "-t", "filesystem,volume", d.config["zfs.pool_name"]) stdout, err := cmd.StdoutPipe() if err != nil { return nil, err @@ -1824,12 +1824,13 @@ func (d *zfs) ListVolumes() ([]Volume, error) { // Splitting fields on tab should be safe as ZFS doesn't appear to allow tabs in dataset names. parts := strings.Split(line, "\t") - if len(parts) != 2 { + if len(parts) != 3 { return nil, fmt.Errorf("Unexpected volume line %q", line) } zfsVolName := parts[0] zfsContentType := parts[1] + lxdContentType := parts[2] var volType VolumeType var volName string @@ -1875,7 +1876,14 @@ func (d *zfs) ListVolumes() ([]Volume, error) { v := NewVolume(d, d.name, volType, contentType, volName, make(map[string]string), d.config) if isBlock { - v.SetMountFilesystemProbe(true) + // Get correct content type from lxd:content_type property. + if lxdContentType != "-" { + v.contentType = ContentType(lxdContentType) + } + + if v.contentType == ContentTypeBlock { + v.SetMountFilesystemProbe(true) + } } vols[volName] = v From 710aea4bb5948c6b7906c68132fc7edb5e297ebb Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Thu, 13 Jul 2023 12:12:32 +0200 Subject: [PATCH 272/543] patches: Add patchZfsSetContentTypeUserProperty Ths adds the `lxd:content_type` user property to custom storage volumes. In case of recovery, this allows for proper detection of block-mode enabled volumes. Signed-off-by: Thomas Hipp --- lxd/patches.go | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/lxd/patches.go b/lxd/patches.go index 6b7ea8981f20..c67c0c0b56b6 100644 --- a/lxd/patches.go +++ b/lxd/patches.go @@ -74,6 +74,7 @@ var patches = []patch{ {name: "storage_delete_old_snapshot_records", stage: patchPostDaemonStorage, run: patchGenericStorage}, {name: "storage_zfs_drop_block_volume_filesystem_extension", stage: patchPostDaemonStorage, run: patchGenericStorage}, {name: "storage_move_custom_iso_block_volumes", stage: patchPostDaemonStorage, run: patchStorageRenameCustomISOBlockVolumes}, + {name: "zfs_set_content_type_user_property", stage: patchPostDaemonStorage, run: patchZfsSetContentTypeUserProperty}, } type patch struct { @@ -839,4 +840,73 @@ func patchStorageRenameCustomISOBlockVolumes(name string, d *Daemon) error { return nil } +// patchZfsSetContentTypeUserProperty adds the `lxd:content_type` user property to custom storage volumes. In case of recovery, this allows for proper detection of block-mode enabled volumes. +func patchZfsSetContentTypeUserProperty(name string, d *Daemon) error { + s := d.State() + + // Get all storage pool names. + pools, err := s.DB.Cluster.GetStoragePoolNames() + if err != nil { + return fmt.Errorf("Failed getting storage pool names: %w", err) + } + + volTypeCustom := db.StoragePoolVolumeTypeCustom + customPoolVolumes := make(map[string][]*db.StorageVolume, 0) + + err = s.DB.Cluster.Transaction(s.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error { + for _, pool := range pools { + // Get storage pool ID. + poolID, err := tx.GetStoragePoolID(ctx, pool) + if err != nil { + return fmt.Errorf("Failed getting storage pool ID of pool %q: %w", pool, err) + } + + // Get the pool's custom storage volumes. + customVolumes, err := tx.GetStoragePoolVolumes(ctx, poolID, false, db.StorageVolumeFilter{Type: &volTypeCustom}) + if err != nil { + return fmt.Errorf("Failed getting custom storage volumes of pool %q: %w", pool, err) + } + + if customPoolVolumes[pool] == nil { + customPoolVolumes[pool] = []*db.StorageVolume{} + } + + customPoolVolumes[pool] = append(customPoolVolumes[pool], customVolumes...) + } + + return nil + }) + if err != nil { + return err + } + + for poolName, volumes := range customPoolVolumes { + // Load storage pool. + p, err := storagePools.LoadByName(s, poolName) + if err != nil { + return fmt.Errorf("Failed loading pool %q: %w", poolName, err) + } + + if p.Driver().Info().Name != "zfs" { + continue + } + + for _, vol := range volumes { + zfsPoolName := p.Driver().Config()["zfs.pool_name"] + if zfsPoolName != "" { + poolName = zfsPoolName + } + + zfsVolName := fmt.Sprintf("%s/%s/%s", poolName, storageDrivers.VolumeTypeCustom, project.StorageVolume(vol.Project, vol.Name)) + + _, err = shared.RunCommand("zfs", "set", fmt.Sprintf("lxd:content_type=%s", vol.ContentType), zfsVolName) + if err != nil { + logger.Debug("Failed setting lxd:content_type property", logger.Ctx{"name": zfsVolName, "err": err}) + } + } + } + + return nil +} + // Patches end here From ed4f60b64c016a77e80394d06f8d8aad4018c532 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Thu, 13 Jul 2023 15:19:13 +0200 Subject: [PATCH 273/543] test: Recover zfs block_mode custom volumes Signed-off-by: Thomas Hipp --- test/suites/backup.sh | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/test/suites/backup.sh b/test/suites/backup.sh index 8a339c6f775e..55fe16b2c945 100644 --- a/test/suites/backup.sh +++ b/test/suites/backup.sh @@ -4,6 +4,7 @@ test_storage_volume_recover() { spawn_lxd "${LXD_IMPORT_DIR}" true poolName=$(lxc profile device get default root pool) + poolDriver=$(lxc storage show "${poolName}" | awk '/^driver:/ {print $2}') # Create custom block volume. lxc storage volume create "${poolName}" vol1 --type=block @@ -14,6 +15,22 @@ test_storage_volume_recover() { # Ensure the custom block volume is no longer listed. ! lxc storage volume show "${poolName}" vol1 || false + if [ "$poolDriver" = "zfs" ]; then + # Create filesystem volume. + lxc storage volume create "${poolName}" vol3 + + # Create block_mode enabled volume. + lxc storage volume create "${poolName}" vol4 zfs.block_mode=true size=200MiB + + # Delete database entries of the created custom volumes. + lxd sql global "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='vol3'" + lxd sql global "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='vol4'" + + # Ensure the custom volumes are no longer listed. + ! lxc storage volume show "${poolName}" vol3 || false + ! lxc storage volume show "${poolName}" vol4 || false + fi + # Recover custom block volume. cat < Date: Tue, 18 Jul 2023 11:05:34 +0200 Subject: [PATCH 274/543] lxd/network/common: Prevent unspecified network forward address MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using either 0.0.0.0 or :: for the forwards listen address is not supported. Signed-off-by: Julian Pelizäus --- lxd/network/driver_common.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lxd/network/driver_common.go b/lxd/network/driver_common.go index a2e96f97b584..2954f43221c2 100644 --- a/lxd/network/driver_common.go +++ b/lxd/network/driver_common.go @@ -762,6 +762,10 @@ func (n *common) forwardValidate(listenAddress net.IP, forward *api.NetworkForwa return nil, fmt.Errorf("Invalid listen address") } + if listenAddress.IsUnspecified() { + return nil, fmt.Errorf("Cannot use unspecified address: %q", listenAddress.String()) + } + listenIsIP4 := listenAddress.To4() != nil // For checking target addresses are within network's subnet. From 9282f2bf74abcdcbb8d2df7f03411ef45603da57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Tue, 18 Jul 2023 11:09:39 +0200 Subject: [PATCH 275/543] tests: Check unspecified network forward addresses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- test/suites/network_forward.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/suites/network_forward.sh b/test/suites/network_forward.sh index a1d6e1fec2ab..c983b39297be 100644 --- a/test/suites/network_forward.sh +++ b/test/suites/network_forward.sh @@ -9,6 +9,12 @@ test_network_forward() { ipv4.address=192.0.2.1/24 \ ipv6.address=fd42:4242:4242:1010::1/64 + # Check creating a forward with an unspecified IPv4 address fails. + ! lxc network forward create "${netName}" 0.0.0.0 || false + + # Check creating a forward with an unspecified IPv6 address fails. + ! lxc network forward create "${netName}" :: || false + # Check creating empty forward doesn't create any firewall rules. lxc network forward create "${netName}" 198.51.100.1 if [ "$firewallDriver" = "xtables" ]; then From c6616bc213acbeffc8ced573bbde9638ecc166a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Mon, 17 Jul 2023 11:41:19 +0200 Subject: [PATCH 276/543] lxd/shared/instancewriter: Use right header key for tar ACLs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When creating an archive from a container, use the right header key for the go tar module. There is no reference to `SCHILY.acl.*` in the go codebase under src/archive/tar. Signed-off-by: Julian Pelizäus --- shared/instancewriter/instance_tar_writer.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/shared/instancewriter/instance_tar_writer.go b/shared/instancewriter/instance_tar_writer.go index 210fabbaa79a..38327a180234 100644 --- a/shared/instancewriter/instance_tar_writer.go +++ b/shared/instancewriter/instance_tar_writer.go @@ -117,7 +117,7 @@ func (ctw *InstanceTarWriter) WriteFile(name string, srcPath string, fi os.FileI continue } - hdr.PAXRecords["SCHILY.acl.access"] = aclAccess + val = aclAccess } else if key == "system.posix_acl_default" && ctw.idmapSet != nil { aclDefault, err := idmap.UnshiftACL(val, ctw.idmapSet) if err != nil { @@ -125,7 +125,7 @@ func (ctw *InstanceTarWriter) WriteFile(name string, srcPath string, fi os.FileI continue } - hdr.PAXRecords["SCHILY.acl.default"] = aclDefault + val = aclDefault } else if key == "security.capability" && ctw.idmapSet != nil { vfsCaps, err := idmap.UnshiftCaps(val, ctw.idmapSet) if err != nil { @@ -133,10 +133,10 @@ func (ctw *InstanceTarWriter) WriteFile(name string, srcPath string, fi os.FileI continue } - hdr.PAXRecords["SCHILY.xattr."+key] = vfsCaps - } else { - hdr.PAXRecords["SCHILY.xattr."+key] = val + val = vfsCaps } + + hdr.PAXRecords["SCHILY.xattr."+key] = val } } From 335e1de51cdb91b955f4514090121ff3e22e5986 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Peliz=C3=A4us?= Date: Tue, 18 Jul 2023 09:38:05 +0200 Subject: [PATCH 277/543] tests: Add file ACL test for image publishing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Julian Pelizäus --- test/main.sh | 1 + test/suites/image_acl.sh | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 test/suites/image_acl.sh diff --git a/test/main.sh b/test/main.sh index 73cf588e0145..37d80071a24f 100755 --- a/test/main.sh +++ b/test/main.sh @@ -276,6 +276,7 @@ if [ "${1:-"all"}" != "cluster" ]; then run_test test_image_prefer_cached "image prefer cached" run_test test_image_import_dir "import image from directory" run_test test_image_refresh "image refresh" + run_test test_image_acl "image acl" run_test test_cloud_init "cloud-init" run_test test_exec "exec" run_test test_concurrent_exec "concurrent exec" diff --git a/test/suites/image_acl.sh b/test/suites/image_acl.sh new file mode 100644 index 000000000000..61f98bf502be --- /dev/null +++ b/test/suites/image_acl.sh @@ -0,0 +1,25 @@ +test_image_acl() { + ensure_import_testimage + + # Launch a new container with an ACL applied file + lxc launch testimage c1 + CONTAINER_PID="$(lxc query /1.0/instances/c1?recursion=1 | jq '.state.pid')" + lxc exec c1 touch foo + setfacl -m user:1000001:rwx "/proc/$CONTAINER_PID/root/root/foo" + setfacl -m group:1000001:rwx "/proc/$CONTAINER_PID/root/root/foo" + + # Publish the container to a new image + lxc stop c1 + lxc publish c1 --alias c1-with-acl + + # Launch a new container from the existing image + lxc launch c1-with-acl c2 + + # Check if the ACLs are still present + CONTAINER_PID="$(lxc query /1.0/instances/c2?recursion=1 | jq '.state.pid')" + getfacl "/proc/$CONTAINER_PID/root/root/foo" | grep -q "user:1000001:rwx" + getfacl "/proc/$CONTAINER_PID/root/root/foo" | grep -q "group:1000001:rwx" + + lxc delete -f c1 c2 + lxc image delete c1-with-acl +} From 47232154249511d53a6084837b290eb391aa7434 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Tue, 11 Jul 2023 18:57:33 +0200 Subject: [PATCH 278/543] test: get/set/unset of instance properties Signed-off-by: Gabriel Mougard --- test/main.sh | 1 + test/suites/config.sh | 55 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/test/main.sh b/test/main.sh index 37d80071a24f..a780e1a45dee 100755 --- a/test/main.sh +++ b/test/main.sh @@ -288,6 +288,7 @@ if [ "${1:-"all"}" != "cluster" ]; then run_test test_snap_volume_db_recovery "snapshot volume database record recovery" run_test test_config_profiles "profiles and configuration" run_test test_config_edit "container configuration edit" + run_test test_property "container property" run_test test_config_edit_container_snapshot_pool_config "container and snapshot volume configuration edit" run_test test_container_metadata "manage container metadata and templates" run_test test_container_snapshot_config "container snapshot configuration" diff --git a/test/suites/config.sh b/test/suites/config.sh index 8599e0ccfcef..1d75b7077b82 100644 --- a/test/suites/config.sh +++ b/test/suites/config.sh @@ -287,6 +287,61 @@ test_config_edit() { lxc delete foo } +test_property() { + ensure_import_testimage + + lxc init testimage foo -s "lxdtest-$(basename "${LXD_DIR}")" + + # Set a property of an instance + lxc config set foo description="a new description" --property + # Check that the property is set + lxc config show foo | grep -q "description: a new description" + + # Unset a property of an instance + lxc config unset foo description --property + # Check that the property is unset + ! lxc config show foo | grep -q "description: a new description" || false + + # Set a property of an instance (bool) + lxc config set foo ephemeral=true --property + # Check that the property is set + lxc config show foo | grep -q "ephemeral: true" + + # Unset a property of an instance (bool) + lxc config unset foo ephemeral --property + # Check that the property is unset (i.e false) + lxc config show foo | grep -q "ephemeral: false" + + # Create a snap of the instance to set its expiration timestamp + lxc snapshot foo s1 + lxc config set foo/s1 expires_at="2024-03-23T17:38:37.753398689-04:00" --property + lxc config show foo/s1 | grep -q "expires_at: 2024-03-23T17:38:37.753398689-04:00" + lxc config unset foo/s1 expires_at --property + lxc config show foo/s1 | grep -q "expires_at: 0001-01-01T00:00:00Z" + + + # Create a storage volume, create a volume snapshot and set its expiration timestamp + # shellcheck disable=2039,3043 + local storage_pool + storage_pool="lxdtest-$(basename "${LXD_DIR}")" + storage_volume="${storage_pool}-vol" + + lxc storage volume create "${storage_pool}" "${storage_volume}" + lxc launch testimage c1 -s "${storage_pool}" + + # This will create a snapshot named 'snap0' + lxc storage volume snapshot "${storage_pool}" "${storage_volume}" + + lxc storage volume set "${storage_pool}" "${storage_volume}"/snap0 expires_at="2024-03-23T17:38:37.753398689-04:00" --property + lxc storage volume show "${storage_pool}" "${storage_volume}/snap0" | grep 'expires_at: 2024-03-23T17:38:37.753398689-04:00' + lxc storage volume unset "${storage_pool}" "${storage_volume}"/snap0 expires_at --property + lxc storage volume show "${storage_pool}" "${storage_volume}/snap0" | grep 'expires_at: 0001-01-01T00:00:00Z' + + lxc delete -f c1 + lxc storage volume delete "${storage_pool}" "${storage_volume}" + lxc delete -f foo +} + test_config_edit_container_snapshot_pool_config() { # shellcheck disable=2034,2039,2155,3043 local storage_pool="lxdtest-$(basename "${LXD_DIR}")" From 93dd3bc13fa3d903dcc5a3b6d277047d2ff7f8b8 Mon Sep 17 00:00:00 2001 From: Gabriel Mougard Date: Fri, 14 Jul 2023 18:12:41 +0200 Subject: [PATCH 279/543] doc: Update the doc to describe how to get/set/unset instance properties Signed-off-by: Gabriel Mougard --- doc/explanation/instance_config.md | 2 +- doc/howto/instances_configure.md | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/doc/explanation/instance_config.md b/doc/explanation/instance_config.md index b1a8664c25ab..ff795cf933d3 100644 --- a/doc/explanation/instance_config.md +++ b/doc/explanation/instance_config.md @@ -6,7 +6,7 @@ The instance configuration consists of different categories: Instance properties : Instance properties are specified when the instance is created. They include, for example, the instance name and architecture. - Some of the properties are read-only and cannot be changed after creation, while others can be updated when {ref}`editing the full instance configuration `. + Some of the properties are read-only and cannot be changed after creation, while others can be updated by {ref}`setting their property value ` or {ref}`editing the full instance configuration `. In the YAML configuration, properties are on the top level. diff --git a/doc/howto/instances_configure.md b/doc/howto/instances_configure.md index a1dc6eb00711..4d9ccdf0b223 100644 --- a/doc/howto/instances_configure.md +++ b/doc/howto/instances_configure.md @@ -32,6 +32,22 @@ Others are updated only when the instance is restarted. See the "Live update" column in the {ref}`instance-options` tables for information about which options are applied immediately while the instance is running. ``` +(instances-configure-properties)= +## Configure instance properties + +To update instance properties after the instance is created, use the `lxc config set` command with the `--property` flag. +Specify the instance name and the key and value of the instance property: + + lxc config set = = ... --property + +Using the same flag, you can also unset a property just like you would unset a configuration option: + + lxc config unset --property + +You can also retrieve a specific property value with: + + lxc config get --property + (instances-configure-devices)= ## Configure devices From 176bdaef43c22d40ac04a21567dc015b2fea674f Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Mon, 17 Jul 2023 17:38:49 +0200 Subject: [PATCH 280/543] lxc: Show storage volume info regardless of state This fixes an issue where no storage volume info would be displayed if state returned HTTP 500. Instead, the usage should be omitted. Signed-off-by: Thomas Hipp --- lxc/storage_volume.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/lxc/storage_volume.go b/lxc/storage_volume.go index c7ce8d8feb02..21d963961219 100644 --- a/lxc/storage_volume.go +++ b/lxc/storage_volume.go @@ -1235,10 +1235,8 @@ func (c *cmdStorageVolumeInfo) Run(cmd *cobra.Command, args []string) error { return err } - volState, err := client.GetStoragePoolVolumeState(resource.name, volType, volName) - if err != nil { - return err - } + // Instead of failing here if the usage cannot be determined, it is just omitted. + volState, _ := client.GetStoragePoolVolumeState(resource.name, volType, volName) volSnapshots, err := client.GetStoragePoolVolumeSnapshots(resource.name, volType, volName) if err != nil { @@ -1277,7 +1275,7 @@ func (c *cmdStorageVolumeInfo) Run(cmd *cobra.Command, args []string) error { fmt.Printf(i18n.G("Location: %s")+"\n", vol.Location) } - if volState.Usage != nil { + if volState != nil && volState.Usage != nil { fmt.Printf(i18n.G("Usage: %s")+"\n", units.GetByteSizeStringIEC(int64(volState.Usage.Used), 2)) if volState.Usage.Total > 0 { fmt.Printf(i18n.G("Total: %s")+"\n", units.GetByteSizeStringIEC(int64(volState.Usage.Total), 2)) From baaacd8eab5fc7a4dcf736331816f2269d756d15 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Tue, 18 Jul 2023 17:03:13 +0200 Subject: [PATCH 281/543] lxd/db: Move GetStoragePoolVolumeWithID to ClusterTx Signed-off-by: Thomas Hipp --- lxd/db/entity.go | 22 ++++++++++++++++++++-- lxd/db/storage_volumes.go | 9 +++------ 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/lxd/db/entity.go b/lxd/db/entity.go index b5d4a50febab..2b22a28aca73 100644 --- a/lxd/db/entity.go +++ b/lxd/db/entity.go @@ -285,7 +285,16 @@ func (c *Cluster) GetURIFromEntity(entityType int, entityID int) (string, error) uri = fmt.Sprintf(cluster.EntityURIs[entityType], pool.Name) case cluster.TypeStorageVolume: - args, err := c.GetStoragePoolVolumeWithID(entityID) + var args StorageVolumeArgs + + err := c.Transaction(c.closingCtx, func(ctx context.Context, tx *ClusterTx) error { + args, err = tx.GetStoragePoolVolumeWithID(ctx, entityID) + if err != nil { + return err + } + + return nil + }) if err != nil { return "", fmt.Errorf("Failed to get storage volume: %w", err) } @@ -297,7 +306,16 @@ func (c *Cluster) GetURIFromEntity(entityType int, entityID int) (string, error) return "", fmt.Errorf("Failed to get volume backup: %w", err) } - instance, err := c.GetStoragePoolVolumeWithID(int(backup.ID)) + var instance StorageVolumeArgs + + err = c.Transaction(c.closingCtx, func(ctx context.Context, tx *ClusterTx) error { + instance, err = tx.GetStoragePoolVolumeWithID(ctx, int(backup.ID)) + if err != nil { + return err + } + + return nil + }) if err != nil { return "", fmt.Errorf("Failed to get storage volume: %w", err) } diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go index 74adeff50b73..2dc7c2c99a2e 100644 --- a/lxd/db/storage_volumes.go +++ b/lxd/db/storage_volumes.go @@ -70,7 +70,7 @@ WHERE storage_volumes.type = ? } // GetStoragePoolVolumeWithID returns the volume with the given ID. -func (c *Cluster) GetStoragePoolVolumeWithID(volumeID int) (StorageVolumeArgs, error) { +func (c *ClusterTx) GetStoragePoolVolumeWithID(ctx context.Context, volumeID int) (StorageVolumeArgs, error) { var response StorageVolumeArgs stmt := ` @@ -81,10 +81,7 @@ JOIN projects ON projects.id = storage_volumes.project_id WHERE storage_volumes.id = ? ` - inargs := []any{volumeID} - outargs := []any{&response.ID, &response.Name, &response.Description, &response.PoolName, &response.Type, &response.ProjectName} - - err := dbQueryRowScan(c, stmt, inargs, outargs) + err := c.tx.QueryRowContext(ctx, stmt, volumeID).Scan(&response.ID, &response.Name, &response.Description, &response.PoolName, &response.Type, &response.ProjectName) if err != nil { if err == sql.ErrNoRows { return StorageVolumeArgs{}, api.StatusErrorf(http.StatusNotFound, "Storage pool volume not found") @@ -93,7 +90,7 @@ WHERE storage_volumes.id = ? return StorageVolumeArgs{}, err } - response.Config, err = c.storageVolumeConfigGet(response.ID, false) + response.Config, err = c.storageVolumeConfigGet(ctx, response.ID, false) if err != nil { return StorageVolumeArgs{}, err } From 138e0d70e0032ba4cff911b189d59cb662cc32b8 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Tue, 18 Jul 2023 17:08:49 +0200 Subject: [PATCH 282/543] lxd/db: Fix URI of TypeStorageVolumeBackup entities Signed-off-by: Thomas Hipp --- lxd/db/entity.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lxd/db/entity.go b/lxd/db/entity.go index 2b22a28aca73..07a015d8e4d1 100644 --- a/lxd/db/entity.go +++ b/lxd/db/entity.go @@ -306,10 +306,10 @@ func (c *Cluster) GetURIFromEntity(entityType int, entityID int) (string, error) return "", fmt.Errorf("Failed to get volume backup: %w", err) } - var instance StorageVolumeArgs + var volume StorageVolumeArgs err = c.Transaction(c.closingCtx, func(ctx context.Context, tx *ClusterTx) error { - instance, err = tx.GetStoragePoolVolumeWithID(ctx, int(backup.ID)) + volume, err = tx.GetStoragePoolVolumeWithID(ctx, int(backup.VolumeID)) if err != nil { return err } @@ -320,7 +320,7 @@ func (c *Cluster) GetURIFromEntity(entityType int, entityID int) (string, error) return "", fmt.Errorf("Failed to get storage volume: %w", err) } - uri = fmt.Sprintf(cluster.EntityURIs[entityType], instance.PoolName, instance.Name, backup.Name, instance.ProjectName) + uri = fmt.Sprintf(cluster.EntityURIs[entityType], volume.PoolName, volume.TypeName, volume.Name, backup.Name, volume.ProjectName) case cluster.TypeStorageVolumeSnapshot: snapshot, err := c.GetStorageVolumeSnapshotWithID(entityID) if err != nil { From a497ae41b9d39055d50601a4e090933f30f0d875 Mon Sep 17 00:00:00 2001 From: Thomas Hipp Date: Mon, 17 Jul 2023 15:19:01 +0200 Subject: [PATCH 283/543] lxd/db: Fix column name in storage volume query There correct column name is `storage_volumes.type`. Signed-off-by: Thomas Hipp --- lxd/db/storage_volumes.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go index 2dc7c2c99a2e..0dddc25c7eac 100644 --- a/lxd/db/storage_volumes.go +++ b/lxd/db/storage_volumes.go @@ -74,14 +74,20 @@ func (c *ClusterTx) GetStoragePoolVolumeWithID(ctx context.Context, volumeID int var response StorageVolumeArgs stmt := ` -SELECT storage_volumes.id, storage_volumes.name, storage_volumes.description, storage_pools.name, storage_pools.type, projects.name +SELECT + storage_volumes.id, + storage_volumes.name, + storage_volumes.description, + storage_volumes.type, + storage_pools.name, + projects.name FROM storage_volumes JOIN storage_pools ON storage_pools.id = storage_volumes.storage_pool_id JOIN projects ON projects.id = storage_volumes.project_id WHERE storage_volumes.id = ? ` - err := c.tx.QueryRowContext(ctx, stmt, volumeID).Scan(&response.ID, &response.Name, &response.Description, &response.PoolName, &response.Type, &response.ProjectName) + err := c.tx.QueryRowContext(ctx, stmt, volumeID).Scan(&response.ID, &response.Name, &response.Description, &response.Type, &response.PoolName, &response.ProjectName) if err != nil { if err == sql.ErrNoRows { return StorageVolumeArgs{}, api.StatusErrorf(http.StatusNotFound, "Storage pool volume not found") From 1cb8fa309de73a2525b2188fd6fade87473ec369 Mon Sep 17 00:00:00 2001 From: Thomas Parrott Date: Fri, 22 Sep 2023 14:27:00 +0100 Subject: [PATCH 284/543] i18n: Update translation templates Signed-off-by: Thomas Parrott --- po/ber.po | 3073 ++++++++++++++++++++++++++--------------------- po/bg.po | 3073 ++++++++++++++++++++++++++--------------------- po/ca.po | 3073 ++++++++++++++++++++++++++--------------------- po/cs.po | 3073 ++++++++++++++++++++++++++--------------------- po/de.po | 3148 +++++++++++++++++++++++++++--------------------- po/el.po | 3087 ++++++++++++++++++++++++++--------------------- po/eo.po | 3073 ++++++++++++++++++++++++++--------------------- po/es.po | 3102 +++++++++++++++++++++++++++--------------------- po/fa.po | 3073 ++++++++++++++++++++++++++--------------------- po/fi.po | 3073 ++++++++++++++++++++++++++--------------------- po/fr.po | 3156 +++++++++++++++++++++++++++--------------------- po/he.po | 3073 ++++++++++++++++++++++++++--------------------- po/hi.po | 3073 ++++++++++++++++++++++++++--------------------- po/id.po | 3073 ++++++++++++++++++++++++++--------------------- po/it.po | 3099 ++++++++++++++++++++++++++--------------------- po/ja.po | 3165 ++++++++++++++++++++++++++++--------------------- po/ko.po | 3073 ++++++++++++++++++++++++++--------------------- po/lxd.pot | 2627 ++++++++++++++++++++++------------------ po/mr.po | 3073 ++++++++++++++++++++++++++--------------------- po/nb_NO.po | 3073 ++++++++++++++++++++++++++--------------------- po/nl.po | 3077 ++++++++++++++++++++++++++--------------------- po/pa.po | 3073 ++++++++++++++++++++++++++--------------------- po/pl.po | 3073 ++++++++++++++++++++++++++--------------------- po/pt_BR.po | 3105 +++++++++++++++++++++++++++--------------------- po/ru.po | 3103 +++++++++++++++++++++++++++--------------------- po/si.po | 3073 ++++++++++++++++++++++++++--------------------- po/sl.po | 3073 ++++++++++++++++++++++++++--------------------- po/sr.po | 3073 ++++++++++++++++++++++++++--------------------- po/sv.po | 3073 ++++++++++++++++++++++++++--------------------- po/te.po | 3073 ++++++++++++++++++++++++++--------------------- po/th.po | 3073 ++++++++++++++++++++++++++--------------------- po/tr.po | 3073 ++++++++++++++++++++++++++--------------------- po/tzm.po | 3073 ++++++++++++++++++++++++++--------------------- po/ug.po | 3073 ++++++++++++++++++++++++++--------------------- po/uk.po | 3073 ++++++++++++++++++++++++++--------------------- po/zh_Hans.po | 3091 ++++++++++++++++++++++++++--------------------- po/zh_Hant.po | 3073 ++++++++++++++++++++++++++--------------------- 37 files changed, 63742 insertions(+), 49916 deletions(-) diff --git a/po/ber.po b/po/ber.po index 09091fc8cb56..237b8a3afc12 100644 --- a/po/ber.po +++ b/po/ber.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: lxd\n" "Report-Msgid-Bugs-To: lxd@lists.canonical.com\n" -"POT-Creation-Date: 2023-07-13 07:56+0100\n" +"POT-Creation-Date: 2023-09-22 14:26+0100\n" "PO-Revision-Date: 2022-03-10 15:10+0000\n" "Last-Translator: Anonymous \n" "Language-Team: Berber " msgstr "" -#: lxc/alias.go:55 +#: lxc/alias.go:58 msgid " " msgstr "" -#: lxc/alias.go:145 +#: lxc/alias.go:156 msgid " " msgstr "" -#: lxc/remote.go:818 lxc/remote.go:873 +#: lxc/remote.go:816 lxc/remote.go:871 msgid "" msgstr "" -#: lxc/remote.go:911 +#: lxc/remote.go:909 msgid " " msgstr "" -#: lxc/remote.go:746 +#: lxc/remote.go:745 msgid " " msgstr "" -#: lxc/file.go:458 +#: lxc/file.go:457 msgid "... [:]/" msgstr "" -#: lxc/image.go:647 +#: lxc/image.go:646 msgid "" "|| [] [:] [key=value...]" msgstr "" @@ -407,31 +405,31 @@ msgstr "" msgid "" msgstr "" -#: lxc/alias.go:130 lxc/image.go:1051 lxc/image_alias.go:235 +#: lxc/alias.go:139 lxc/image.go:1050 lxc/image_alias.go:234 msgid "ALIAS" msgstr "" -#: lxc/image.go:1052 +#: lxc/image.go:1051 msgid "ALIASES" msgstr "" -#: lxc/cluster.go:183 lxc/image.go:1057 lxc/list.go:555 +#: lxc/cluster.go:182 lxc/image.go:1056 lxc/list.go:554 msgid "ARCHITECTURE" msgstr "" -#: lxc/remote.go:729 +#: lxc/remote.go:728 msgid "AUTH TYPE" msgstr "" -#: lxc/remote.go:99 +#: lxc/remote.go:98 msgid "Accept certificate" msgstr "" -#: lxc/config.go:375 +#: lxc/config.go:383 msgid "Access the expanded configuration" msgstr "" -#: lxc/warning.go:262 lxc/warning.go:263 +#: lxc/warning.go:261 lxc/warning.go:262 msgid "Acknowledge warning" msgstr "" @@ -444,11 +442,15 @@ msgstr "" msgid "Action (defaults to GET)" msgstr "" -#: lxc/network_zone.go:1163 +#: lxc/cluster_group.go:631 +msgid "Add a cluster member to a cluster group" +msgstr "" + +#: lxc/network_zone.go:1239 msgid "Add a network zone record entry" msgstr "" -#: lxc/network_zone.go:1164 +#: lxc/network_zone.go:1240 msgid "Add entries to a network zone record" msgstr "" @@ -456,15 +458,19 @@ msgstr "" msgid "Add instance devices" msgstr "" -#: lxc/alias.go:56 lxc/alias.go:57 +#: lxc/cluster_group.go:630 +msgid "Add member to group" +msgstr "" + +#: lxc/alias.go:59 lxc/alias.go:60 msgid "Add new aliases" msgstr "" -#: lxc/remote.go:88 +#: lxc/remote.go:87 msgid "Add new remote servers" msgstr "" -#: lxc/remote.go:89 +#: lxc/remote.go:88 msgid "" "Add new remote servers\n" "\n" @@ -476,11 +482,11 @@ msgid "" "protocol=simplestreams\n" msgstr "" -#: lxc/config_trust.go:87 +#: lxc/config_trust.go:86 msgid "Add new trusted client" msgstr "" -#: lxc/config_trust.go:88 +#: lxc/config_trust.go:87 msgid "" "Add new trusted client\n" "\n" @@ -497,7 +503,7 @@ msgid "" "restricted to one or more projects.\n" msgstr "" -#: lxc/network_forward.go:706 lxc/network_forward.go:707 +#: lxc/network_forward.go:744 lxc/network_forward.go:745 msgid "Add ports to a forward" msgstr "" @@ -505,65 +511,70 @@ msgstr "" msgid "Add profiles to instances" msgstr "" -#: lxc/cluster_role.go:47 lxc/cluster_role.go:48 +#: lxc/cluster_role.go:49 lxc/cluster_role.go:50 msgid "Add roles to a cluster member" msgstr "" -#: lxc/network_acl.go:727 lxc/network_acl.go:728 +#: lxc/network_acl.go:765 lxc/network_acl.go:766 msgid "Add rules to an ACL" msgstr "" -#: lxc/info.go:202 +#: lxc/info.go:201 #, c-format msgid "Address: %s" msgstr "" -#: lxc/remote.go:558 +#: lxc/remote.go:557 #, c-format msgid "Admin password (or token) for %s:" msgstr "" -#: lxc/alias.go:80 lxc/alias.go:177 +#: lxc/alias.go:85 lxc/alias.go:190 #, c-format msgid "Alias %s already exists" msgstr "" -#: lxc/alias.go:171 lxc/alias.go:222 +#: lxc/alias.go:184 lxc/alias.go:239 #, c-format msgid "Alias %s doesn't exist" msgstr "" -#: lxc/image_alias.go:85 lxc/image_alias.go:132 lxc/image_alias.go:280 +#: lxc/image_alias.go:84 lxc/image_alias.go:131 lxc/image_alias.go:279 msgid "Alias name missing" msgstr "" -#: lxc/image.go:972 +#: lxc/publish.go:242 +#, c-format +msgid "Aliases already exists: %s" +msgstr "" + +#: lxc/image.go:971 msgid "Aliases:" msgstr "" -#: lxc/storage_volume.go:1353 +#: lxc/storage_volume.go:1397 msgid "All projects" msgstr "" -#: lxc/remote.go:180 +#: lxc/remote.go:179 msgid "All server addresses are unavailable" msgstr "" -#: lxc/config_trust.go:103 +#: lxc/config_trust.go:102 msgid "Alternative certificate name" msgstr "" -#: lxc/image.go:943 lxc/info.go:477 +#: lxc/image.go:942 lxc/info.go:476 #, c-format msgid "Architecture: %s" msgstr "" -#: lxc/info.go:128 +#: lxc/info.go:127 #, c-format msgid "Architecture: %v" msgstr "" -#: lxc/cluster.go:1123 +#: lxc/cluster.go:1166 #, c-format msgid "Are you sure you want to %s cluster member %q? (yes/no) [default=no]: " msgstr "" @@ -572,11 +583,11 @@ msgstr "" msgid "As neither could be found, the raw SPICE socket can be found at:" msgstr "" -#: lxc/init.go:371 +#: lxc/init.go:345 msgid "Asked for a VM but image is of type container" msgstr "" -#: lxc/cluster_group.go:78 lxc/cluster_group.go:79 +#: lxc/cluster_group.go:83 lxc/cluster_group.go:84 msgid "Assign sets of groups to cluster members" msgstr "" @@ -584,23 +595,23 @@ msgstr "" msgid "Assign sets of profiles to instances" msgstr "" -#: lxc/network.go:127 +#: lxc/network.go:126 msgid "Attach network interfaces to instances" msgstr "" -#: lxc/network.go:212 lxc/network.go:213 +#: lxc/network.go:211 lxc/network.go:212 msgid "Attach network interfaces to profiles" msgstr "" -#: lxc/network.go:128 +#: lxc/network.go:127 msgid "Attach new network interfaces to instances" msgstr "" -#: lxc/storage_volume.go:163 lxc/storage_volume.go:164 +#: lxc/storage_volume.go:164 lxc/storage_volume.go:165 msgid "Attach new storage volumes to instances" msgstr "" -#: lxc/storage_volume.go:238 lxc/storage_volume.go:239 +#: lxc/storage_volume.go:239 lxc/storage_volume.go:240 msgid "Attach new storage volumes to profiles" msgstr "" @@ -616,30 +627,30 @@ msgid "" "as well as retrieve past log entries from it." msgstr "" -#: lxc/remote.go:544 +#: lxc/remote.go:543 #, c-format msgid "Authentication type '%s' not supported by server" msgstr "" -#: lxc/info.go:221 +#: lxc/info.go:220 #, c-format msgid "Auto negotiation: %v" msgstr "" -#: lxc/image.go:173 +#: lxc/image.go:172 msgid "Auto update is only available in pull mode" msgstr "" -#: lxc/image.go:982 +#: lxc/image.go:981 #, c-format msgid "Auto update: %s" msgstr "" -#: lxc/remote.go:135 +#: lxc/remote.go:134 msgid "Available projects:" msgstr "" -#: lxc/list.go:561 lxc/list.go:562 +#: lxc/list.go:560 lxc/list.go:561 msgid "BASE IMAGE" msgstr "" @@ -648,152 +659,152 @@ msgstr "" msgid "Backing up instance: %s" msgstr "" -#: lxc/storage_volume.go:2151 +#: lxc/storage_volume.go:2275 #, c-format msgid "Backing up storage volume: %s" msgstr "" -#: lxc/export.go:166 lxc/storage_volume.go:2220 +#: lxc/export.go:192 lxc/storage_volume.go:2352 msgid "Backup exported successfully!" msgstr "" -#: lxc/info.go:645 lxc/storage_volume.go:1284 +#: lxc/info.go:644 lxc/storage_volume.go:1328 msgid "Backups:" msgstr "" -#: lxc/utils.go:96 +#: lxc/utils.go:97 #, c-format msgid "Bad device override syntax, expecting ,=: %s" msgstr "" -#: lxc/network.go:321 lxc/network_acl.go:369 lxc/network_forward.go:285 -#: lxc/network_peer.go:281 lxc/network_zone.go:312 lxc/network_zone.go:871 +#: lxc/network.go:325 lxc/network_acl.go:382 lxc/network_forward.go:284 +#: lxc/network_peer.go:280 lxc/network_zone.go:324 lxc/network_zone.go:922 #, c-format msgid "Bad key/value pair: %s" msgstr "" -#: lxc/copy.go:136 lxc/init.go:215 lxc/project.go:129 lxc/publish.go:178 -#: lxc/storage.go:127 lxc/storage_volume.go:569 +#: lxc/copy.go:135 lxc/init.go:214 lxc/project.go:128 lxc/publish.go:179 +#: lxc/storage.go:126 lxc/storage_volume.go:574 #, c-format msgid "Bad key=value pair: %s" msgstr "" -#: lxc/image.go:754 +#: lxc/image.go:753 #, c-format msgid "Bad property: %s" msgstr "" -#: lxc/network.go:834 +#: lxc/network.go:851 msgid "Bond:" msgstr "" -#: lxc/action.go:136 lxc/action.go:309 +#: lxc/action.go:148 lxc/action.go:325 msgid "Both --all and instance name given" msgstr "" -#: lxc/info.go:129 +#: lxc/info.go:128 #, c-format msgid "Brand: %v" msgstr "" -#: lxc/network.go:847 +#: lxc/network.go:864 msgid "Bridge:" msgstr "" -#: lxc/info.go:568 lxc/network.go:826 +#: lxc/info.go:567 lxc/network.go:843 msgid "Bytes received" msgstr "" -#: lxc/info.go:569 lxc/network.go:827 +#: lxc/info.go:568 lxc/network.go:844 msgid "Bytes sent" msgstr "" -#: lxc/operation.go:166 +#: lxc/operation.go:165 msgid "CANCELABLE" msgstr "" -#: lxc/config_trust.go:404 +#: lxc/config_trust.go:410 msgid "COMMON NAME" msgstr "" -#: lxc/storage_volume.go:1460 +#: lxc/storage_volume.go:1504 msgid "CONTENT-TYPE" msgstr "" -#: lxc/warning.go:210 +#: lxc/warning.go:209 msgid "COUNT" msgstr "" -#: lxc/info.go:355 +#: lxc/info.go:354 #, c-format msgid "CPU (%s):" msgstr "" -#: lxc/list.go:573 +#: lxc/list.go:572 msgid "CPU USAGE" msgstr "" -#: lxc/info.go:518 +#: lxc/info.go:517 msgid "CPU usage (in seconds)" msgstr "" -#: lxc/info.go:522 +#: lxc/info.go:521 msgid "CPU usage:" msgstr "" -#: lxc/info.go:358 +#: lxc/info.go:357 #, c-format msgid "CPUs (%s):" msgstr "" -#: lxc/operation.go:167 +#: lxc/operation.go:166 msgid "CREATED" msgstr "" -#: lxc/list.go:557 +#: lxc/list.go:556 msgid "CREATED AT" msgstr "" -#: lxc/info.go:131 +#: lxc/info.go:130 #, c-format msgid "CUDA Version: %v" msgstr "" -#: lxc/image.go:981 +#: lxc/image.go:980 #, c-format msgid "Cached: %s" msgstr "" -#: lxc/info.go:308 +#: lxc/info.go:307 msgid "Caches:" msgstr "" -#: lxc/move.go:117 +#: lxc/move.go:116 msgid "Can't override configuration or profiles in local rename" msgstr "" -#: lxc/image.go:206 +#: lxc/image.go:205 msgid "Can't provide a name for the target image" msgstr "" -#: lxc/file.go:333 +#: lxc/file.go:332 msgid "Can't pull a directory without --recursive" msgstr "" -#: lxc/utils.go:170 lxc/utils.go:190 +#: lxc/utils.go:210 lxc/utils.go:230 #, c-format msgid "Can't read from stdin: %w" msgstr "" -#: lxc/remote.go:853 +#: lxc/remote.go:851 msgid "Can't remove the default remote" msgstr "" -#: lxc/list.go:587 +#: lxc/list.go:586 msgid "Can't specify --fast with --columns" msgstr "" -#: lxc/list.go:460 +#: lxc/list.go:459 msgid "Can't specify --project with --all-projects" msgstr "" @@ -801,20 +812,20 @@ msgstr "" msgid "Can't specify a different remote for rename" msgstr "" -#: lxc/list.go:603 lxc/storage_volume.go:1470 lxc/warning.go:225 +#: lxc/list.go:602 lxc/storage_volume.go:1514 lxc/warning.go:224 msgid "Can't specify column L when not clustered" msgstr "" -#: lxc/file.go:539 +#: lxc/file.go:538 msgid "Can't supply uid/gid/mode in recursive mode" msgstr "" -#: lxc/config.go:558 +#: lxc/config.go:660 #, c-format msgid "Can't unset key '%s', it's not currently set" msgstr "" -#: lxc/remote.go:104 +#: lxc/remote.go:103 msgid "Candid domain to use" msgstr "" @@ -824,60 +835,60 @@ msgid "" "Cannot override config for device %q: Device not found in profile devices" msgstr "" -#: lxc/storage_volume.go:406 +#: lxc/storage_volume.go:411 msgid "" "Cannot set --destination-target when destination server is not clustered" msgstr "" -#: lxc/storage_volume.go:368 +#: lxc/storage_volume.go:369 msgid "Cannot set --target when source server is not clustered" msgstr "" -#: lxc/network_acl.go:781 +#: lxc/network_acl.go:819 #, c-format msgid "Cannot set key: %s" msgstr "" -#: lxc/config_trust.go:184 +#: lxc/config_trust.go:190 msgid "Cannot use metrics type certificate when using a token" msgstr "" -#: lxc/info.go:402 lxc/info.go:414 +#: lxc/info.go:401 lxc/info.go:413 #, c-format msgid "Card %d:" msgstr "" -#: lxc/info.go:114 +#: lxc/info.go:113 #, c-format msgid "Card: %s (%s)" msgstr "" -#: lxc/config_trust.go:624 +#: lxc/config_trust.go:630 #, c-format msgid "Certificate add token for %s deleted" msgstr "" -#: lxc/remote.go:217 +#: lxc/remote.go:216 #, c-format msgid "" "Certificate fingerprint mismatch between certificate token and server %q" msgstr "" -#: lxc/remote.go:445 +#: lxc/remote.go:444 #, c-format msgid "Certificate fingerprint: %s" msgstr "" -#: lxc/network.go:868 +#: lxc/network.go:885 msgid "Chassis" msgstr "" -#: lxc/config_trust.go:208 +#: lxc/config_trust.go:214 #, c-format msgid "Client %s certificate add token:" msgstr "" -#: lxc/remote.go:598 +#: lxc/remote.go:597 msgid "Client certificate now trusted by server:" msgstr "" @@ -886,67 +897,85 @@ msgstr "" msgid "Client version: %s\n" msgstr "" -#: lxc/cluster_group.go:188 +#: lxc/cluster_group.go:196 #, c-format msgid "Cluster group %s created" msgstr "" -#: lxc/cluster_group.go:239 +#: lxc/cluster_group.go:249 #, c-format msgid "Cluster group %s deleted" msgstr "" -#: lxc/cluster_group.go:474 +#: lxc/cluster_group.go:491 #, c-format msgid "Cluster group %s isn't currently applied to %s" msgstr "" -#: lxc/cluster_group.go:541 +#: lxc/cluster_group.go:560 #, c-format msgid "Cluster group %s renamed to %s" msgstr "" -#: lxc/cluster.go:944 +#: lxc/cluster.go:987 #, c-format msgid "Cluster join token for %s:%s deleted" msgstr "" -#: lxc/cluster_group.go:134 +#: lxc/cluster_group.go:140 #, c-format msgid "Cluster member %s added to cluster groups %s" msgstr "" -#: lxc/cluster_group.go:494 +#: lxc/cluster_group.go:676 +#, c-format +msgid "Cluster member %s added to group %s" +msgstr "" + +#: lxc/cluster_group.go:665 +#, c-format +msgid "Cluster member %s is already in group %s" +msgstr "" + +#: lxc/cluster_group.go:511 #, c-format msgid "Cluster member %s removed from group %s" msgstr "" -#: lxc/cluster.go:714 lxc/config.go:98 lxc/config.go:376 lxc/config.go:479 -#: lxc/config.go:626 lxc/config.go:745 lxc/copy.go:61 lxc/info.go:46 -#: lxc/init.go:56 lxc/move.go:66 lxc/network.go:288 lxc/network.go:709 -#: lxc/network.go:767 lxc/network.go:1107 lxc/network.go:1174 -#: lxc/network.go:1236 lxc/network_forward.go:171 lxc/network_forward.go:235 -#: lxc/network_forward.go:390 lxc/network_forward.go:491 -#: lxc/network_forward.go:633 lxc/network_forward.go:710 -#: lxc/network_forward.go:776 lxc/storage.go:96 lxc/storage.go:342 -#: lxc/storage.go:403 lxc/storage.go:638 lxc/storage.go:710 lxc/storage.go:793 -#: lxc/storage_volume.go:334 lxc/storage_volume.go:528 -#: lxc/storage_volume.go:607 lxc/storage_volume.go:848 -#: lxc/storage_volume.go:1049 lxc/storage_volume.go:1137 -#: lxc/storage_volume.go:1564 lxc/storage_volume.go:1596 -#: lxc/storage_volume.go:1712 lxc/storage_volume.go:1803 -#: lxc/storage_volume.go:1896 lxc/storage_volume.go:1933 -#: lxc/storage_volume.go:2026 lxc/storage_volume.go:2098 -#: lxc/storage_volume.go:2240 +#: lxc/config.go:101 lxc/config.go:385 lxc/config.go:528 lxc/config.go:734 +#: lxc/config.go:857 lxc/copy.go:60 lxc/info.go:45 lxc/init.go:55 +#: lxc/move.go:65 lxc/network.go:292 lxc/network.go:715 lxc/network.go:784 +#: lxc/network.go:1126 lxc/network.go:1211 lxc/network.go:1275 +#: lxc/network_forward.go:170 lxc/network_forward.go:234 +#: lxc/network_forward.go:406 lxc/network_forward.go:529 +#: lxc/network_forward.go:671 lxc/network_forward.go:748 +#: lxc/network_forward.go:814 lxc/storage.go:95 lxc/storage.go:343 +#: lxc/storage.go:414 lxc/storage.go:667 lxc/storage.go:757 lxc/storage.go:842 +#: lxc/storage_volume.go:335 lxc/storage_volume.go:533 +#: lxc/storage_volume.go:612 lxc/storage_volume.go:856 +#: lxc/storage_volume.go:1070 lxc/storage_volume.go:1183 +#: lxc/storage_volume.go:1608 lxc/storage_volume.go:1640 +#: lxc/storage_volume.go:1767 lxc/storage_volume.go:1913 +#: lxc/storage_volume.go:2017 lxc/storage_volume.go:2057 +#: lxc/storage_volume.go:2150 lxc/storage_volume.go:2222 +#: lxc/storage_volume.go:2372 msgid "Cluster member name" msgstr "" -#: lxc/cluster.go:584 +#: lxc/cluster.go:752 +msgid "Cluster member name (alternative to passing it as an argument)" +msgstr "" + +#: lxc/cluster.go:776 +msgid "Cluster member name was provided as both a flag and as an argument" +msgstr "" + +#: lxc/cluster.go:622 msgid "Clustering enabled" msgstr "" -#: lxc/image.go:1042 lxc/list.go:133 lxc/storage_volume.go:1352 -#: lxc/warning.go:93 +#: lxc/image.go:1041 lxc/list.go:132 lxc/storage_volume.go:1396 +#: lxc/warning.go:92 msgid "Columns" msgstr "" @@ -970,24 +999,24 @@ msgstr "" msgid "Compression algorithm to use (none for uncompressed)" msgstr "" -#: lxc/copy.go:53 lxc/init.go:49 +#: lxc/copy.go:52 lxc/init.go:48 msgid "Config key/value to apply to the new instance" msgstr "" -#: lxc/project.go:96 +#: lxc/project.go:95 msgid "Config key/value to apply to the new project" msgstr "" -#: lxc/move.go:58 +#: lxc/move.go:57 msgid "Config key/value to apply to the target instance" msgstr "" -#: lxc/cluster.go:679 lxc/cluster_group.go:327 lxc/config.go:263 -#: lxc/config.go:338 lxc/config_metadata.go:147 lxc/config_trust.go:308 -#: lxc/image.go:454 lxc/network.go:674 lxc/network_acl.go:582 -#: lxc/network_forward.go:597 lxc/network_peer.go:573 lxc/network_zone.go:513 -#: lxc/network_zone.go:1067 lxc/profile.go:508 lxc/project.go:312 -#: lxc/storage.go:307 lxc/storage_volume.go:981 lxc/storage_volume.go:1013 +#: lxc/cluster.go:717 lxc/cluster_group.go:339 lxc/config.go:268 +#: lxc/config.go:343 lxc/config_metadata.go:147 lxc/config_trust.go:314 +#: lxc/image.go:453 lxc/network.go:678 lxc/network_acl.go:620 +#: lxc/network_forward.go:635 lxc/network_peer.go:610 lxc/network_zone.go:551 +#: lxc/network_zone.go:1143 lxc/profile.go:518 lxc/project.go:314 +#: lxc/storage.go:306 lxc/storage_volume.go:989 lxc/storage_volume.go:1021 #, c-format msgid "Config parsing error: %s" msgstr "" @@ -996,33 +1025,33 @@ msgstr "" msgid "Console log:" msgstr "" -#: lxc/storage_volume.go:529 +#: lxc/storage_volume.go:534 msgid "Content type, block or filesystem" msgstr "" -#: lxc/storage_volume.go:1228 +#: lxc/storage_volume.go:1272 #, c-format msgid "Content type: %s" msgstr "" -#: lxc/info.go:118 +#: lxc/info.go:117 #, c-format msgid "Control: %s (%s)" msgstr "" -#: lxc/copy.go:59 lxc/move.go:64 +#: lxc/copy.go:58 lxc/move.go:63 msgid "Copy a stateful instance stateless" msgstr "" -#: lxc/image.go:152 +#: lxc/image.go:151 msgid "Copy aliases from source" msgstr "" -#: lxc/image.go:144 +#: lxc/image.go:143 msgid "Copy images between servers" msgstr "" -#: lxc/image.go:145 +#: lxc/image.go:144 msgid "" "Copy images between servers\n" "\n" @@ -1030,11 +1059,11 @@ msgid "" "It requires the source to be an alias and for it to be public." msgstr "" -#: lxc/copy.go:40 +#: lxc/copy.go:39 msgid "Copy instances within or in between LXD servers" msgstr "" -#: lxc/copy.go:41 +#: lxc/copy.go:40 msgid "" "Copy instances within or in between LXD servers\n" "\n" @@ -1050,118 +1079,118 @@ msgid "" "versions.\n" msgstr "" -#: lxc/config_device.go:351 lxc/config_device.go:352 +#: lxc/config_device.go:355 lxc/config_device.go:356 msgid "Copy profile inherited devices and override configuration keys" msgstr "" -#: lxc/profile.go:248 lxc/profile.go:249 +#: lxc/profile.go:249 lxc/profile.go:250 msgid "Copy profiles" msgstr "" -#: lxc/storage_volume.go:329 lxc/storage_volume.go:330 +#: lxc/storage_volume.go:330 lxc/storage_volume.go:331 msgid "Copy storage volumes" msgstr "" -#: lxc/copy.go:58 +#: lxc/copy.go:57 msgid "Copy the instance without its snapshots" msgstr "" -#: lxc/storage_volume.go:336 +#: lxc/storage_volume.go:337 msgid "Copy the volume without its snapshots" msgstr "" -#: lxc/copy.go:62 lxc/image.go:157 lxc/move.go:67 lxc/profile.go:251 -#: lxc/storage_volume.go:337 +#: lxc/copy.go:61 lxc/image.go:156 lxc/move.go:66 lxc/profile.go:252 +#: lxc/storage_volume.go:338 msgid "Copy to a project different from the source" msgstr "" -#: lxc/image.go:155 +#: lxc/image.go:154 msgid "Copy virtual machine images" msgstr "" -#: lxc/image.go:261 +#: lxc/image.go:260 #, c-format msgid "Copying the image: %s" msgstr "" -#: lxc/storage_volume.go:428 +#: lxc/storage_volume.go:433 #, c-format msgid "Copying the storage volume: %s" msgstr "" -#: lxc/info.go:316 +#: lxc/info.go:315 #, c-format msgid "Core %d" msgstr "" -#: lxc/info.go:314 +#: lxc/info.go:313 msgid "Cores:" msgstr "" -#: lxc/remote.go:480 +#: lxc/remote.go:479 #, c-format msgid "Could not close server cert file %q: %w" msgstr "" -#: lxc/remote.go:223 lxc/remote.go:464 +#: lxc/remote.go:222 lxc/remote.go:463 msgid "Could not create server cert dir" msgstr "" -#: lxc/cluster.go:1008 +#: lxc/cluster.go:1051 #, c-format msgid "Could not find certificate file path: %s" msgstr "" -#: lxc/cluster.go:1012 +#: lxc/cluster.go:1055 #, c-format msgid "Could not find certificate key file path: %s" msgstr "" -#: lxc/cluster.go:1017 +#: lxc/cluster.go:1060 #, c-format msgid "Could not read certificate file: %s with error: %v" msgstr "" -#: lxc/cluster.go:1022 +#: lxc/cluster.go:1065 #, c-format msgid "Could not read certificate key file: %s with error: %v" msgstr "" -#: lxc/cluster.go:1039 +#: lxc/cluster.go:1082 #, c-format msgid "Could not write new remote certificate for remote '%s' with error: %v" msgstr "" -#: lxc/remote.go:475 +#: lxc/remote.go:474 #, c-format msgid "Could not write server cert file %q: %w" msgstr "" -#: lxc/network_zone.go:1251 +#: lxc/network_zone.go:1327 msgid "Couldn't find a matching entry" msgstr "" -#: lxc/cluster_group.go:149 lxc/cluster_group.go:150 +#: lxc/cluster_group.go:156 lxc/cluster_group.go:157 msgid "Create a cluster group" msgstr "" -#: lxc/init.go:59 +#: lxc/init.go:58 msgid "Create a virtual machine" msgstr "" -#: lxc/image_alias.go:60 lxc/image_alias.go:61 +#: lxc/image_alias.go:59 lxc/image_alias.go:60 msgid "Create aliases for existing images" msgstr "" -#: lxc/init.go:58 +#: lxc/init.go:57 msgid "Create an empty instance" msgstr "" -#: lxc/launch.go:24 lxc/launch.go:25 +#: lxc/launch.go:23 lxc/launch.go:24 msgid "Create and start instances from images" msgstr "" -#: lxc/file.go:244 lxc/file.go:467 +#: lxc/file.go:243 lxc/file.go:466 msgid "Create any directories necessary" msgstr "" @@ -1177,135 +1206,135 @@ msgid "" "running state, including process memory state, TCP connections, ..." msgstr "" -#: lxc/init.go:40 lxc/init.go:41 +#: lxc/init.go:39 lxc/init.go:40 msgid "Create instances from images" msgstr "" -#: lxc/storage_volume.go:524 lxc/storage_volume.go:525 +#: lxc/storage_volume.go:529 lxc/storage_volume.go:530 msgid "Create new custom storage volumes" msgstr "" -#: lxc/config_template.go:67 lxc/config_template.go:68 +#: lxc/config_template.go:66 lxc/config_template.go:67 msgid "Create new instance file templates" msgstr "" -#: lxc/network_acl.go:313 lxc/network_acl.go:314 +#: lxc/network_acl.go:326 lxc/network_acl.go:327 msgid "Create new network ACLs" msgstr "" -#: lxc/network_forward.go:231 lxc/network_forward.go:232 +#: lxc/network_forward.go:230 lxc/network_forward.go:231 msgid "Create new network forwards" msgstr "" -#: lxc/network_peer.go:215 lxc/network_peer.go:216 +#: lxc/network_peer.go:214 lxc/network_peer.go:215 msgid "Create new network peering" msgstr "" -#: lxc/network_zone.go:818 lxc/network_zone.go:819 +#: lxc/network_zone.go:869 lxc/network_zone.go:870 msgid "Create new network zone record" msgstr "" -#: lxc/network_zone.go:258 lxc/network_zone.go:259 +#: lxc/network_zone.go:270 lxc/network_zone.go:271 msgid "Create new network zones" msgstr "" -#: lxc/network.go:285 lxc/network.go:286 +#: lxc/network.go:284 lxc/network.go:285 msgid "Create new networks" msgstr "" -#: lxc/profile.go:309 lxc/profile.go:310 +#: lxc/profile.go:319 lxc/profile.go:320 msgid "Create profiles" msgstr "" -#: lxc/project.go:93 lxc/project.go:94 +#: lxc/project.go:92 lxc/project.go:93 msgid "Create projects" msgstr "" -#: lxc/storage.go:92 lxc/storage.go:93 +#: lxc/storage.go:91 lxc/storage.go:92 msgid "Create storage pools" msgstr "" -#: lxc/copy.go:63 lxc/init.go:57 +#: lxc/copy.go:62 lxc/init.go:56 msgid "Create the instance with no profiles applied" msgstr "" -#: lxc/image.go:950 lxc/info.go:488 +#: lxc/image.go:949 lxc/info.go:487 #, c-format msgid "Created: %s" msgstr "" -#: lxc/init.go:162 +#: lxc/init.go:161 #, c-format msgid "Creating %s" msgstr "" -#: lxc/init.go:160 +#: lxc/init.go:159 msgid "Creating the instance" msgstr "" -#: lxc/info.go:138 lxc/info.go:247 +#: lxc/info.go:137 lxc/info.go:246 #, c-format msgid "Current number of VFs: %d" msgstr "" -#: lxc/network_forward.go:147 +#: lxc/network_forward.go:146 msgid "DEFAULT TARGET ADDRESS" msgstr "" -#: lxc/cluster.go:185 lxc/cluster_group.go:423 lxc/image.go:1056 -#: lxc/image_alias.go:238 lxc/list.go:558 lxc/network.go:960 -#: lxc/network_acl.go:149 lxc/network_forward.go:146 lxc/network_peer.go:141 -#: lxc/network_zone.go:140 lxc/network_zone.go:704 lxc/operation.go:164 -#: lxc/profile.go:634 lxc/project.go:477 lxc/storage.go:615 -#: lxc/storage_volume.go:1459 +#: lxc/cluster.go:184 lxc/cluster_group.go:438 lxc/image.go:1055 +#: lxc/image_alias.go:237 lxc/list.go:557 lxc/network.go:977 +#: lxc/network_acl.go:148 lxc/network_forward.go:145 lxc/network_peer.go:140 +#: lxc/network_zone.go:139 lxc/network_zone.go:742 lxc/operation.go:163 +#: lxc/profile.go:658 lxc/project.go:492 lxc/storage.go:642 +#: lxc/storage_volume.go:1503 msgid "DESCRIPTION" msgstr "" -#: lxc/list.go:559 +#: lxc/list.go:558 msgid "DISK USAGE" msgstr "" -#: lxc/storage.go:608 +#: lxc/storage.go:635 msgid "DRIVER" msgstr "" -#: lxc/info.go:110 +#: lxc/info.go:109 msgid "DRM:" msgstr "" -#: lxc/network.go:851 +#: lxc/network.go:868 msgid "Default VLAN ID" msgstr "" -#: lxc/storage_volume.go:2097 +#: lxc/storage_volume.go:2221 msgid "Define a compression algorithm: for backup or none" msgstr "" -#: lxc/operation.go:55 lxc/operation.go:56 +#: lxc/operation.go:54 lxc/operation.go:55 msgid "Delete a background operation (will attempt to cancel)" msgstr "" -#: lxc/cluster_group.go:204 lxc/cluster_group.go:205 +#: lxc/cluster_group.go:213 lxc/cluster_group.go:214 msgid "Delete a cluster group" msgstr "" -#: lxc/warning.go:361 +#: lxc/warning.go:360 msgid "Delete all warnings" msgstr "" -#: lxc/file.go:118 lxc/file.go:119 +#: lxc/file.go:117 lxc/file.go:118 msgid "Delete files in instances" msgstr "" -#: lxc/image_alias.go:107 lxc/image_alias.go:108 +#: lxc/image_alias.go:106 lxc/image_alias.go:107 msgid "Delete image aliases" msgstr "" -#: lxc/image.go:309 lxc/image.go:310 +#: lxc/image.go:308 lxc/image.go:309 msgid "Delete images" msgstr "" -#: lxc/config_template.go:110 lxc/config_template.go:111 +#: lxc/config_template.go:109 lxc/config_template.go:110 msgid "Delete instance file templates" msgstr "" @@ -1313,211 +1342,212 @@ msgstr "" msgid "Delete instances and snapshots" msgstr "" -#: lxc/network_acl.go:663 lxc/network_acl.go:664 +#: lxc/network_acl.go:701 lxc/network_acl.go:702 msgid "Delete network ACLs" msgstr "" -#: lxc/network_forward.go:629 lxc/network_forward.go:630 +#: lxc/network_forward.go:667 lxc/network_forward.go:668 msgid "Delete network forwards" msgstr "" -#: lxc/network_peer.go:605 lxc/network_peer.go:606 +#: lxc/network_peer.go:642 lxc/network_peer.go:643 msgid "Delete network peerings" msgstr "" -#: lxc/network_zone.go:1099 lxc/network_zone.go:1100 +#: lxc/network_zone.go:1175 lxc/network_zone.go:1176 msgid "Delete network zone record" msgstr "" -#: lxc/network_zone.go:545 lxc/network_zone.go:546 +#: lxc/network_zone.go:583 lxc/network_zone.go:584 msgid "Delete network zones" msgstr "" -#: lxc/network.go:359 lxc/network.go:360 +#: lxc/network.go:363 lxc/network.go:364 msgid "Delete networks" msgstr "" -#: lxc/profile.go:363 lxc/profile.go:364 +#: lxc/profile.go:373 lxc/profile.go:374 msgid "Delete profiles" msgstr "" -#: lxc/project.go:158 lxc/project.go:159 +#: lxc/project.go:157 lxc/project.go:158 msgid "Delete projects" msgstr "" -#: lxc/storage.go:166 lxc/storage.go:167 +#: lxc/storage.go:165 lxc/storage.go:166 msgid "Delete storage pools" msgstr "" -#: lxc/storage_volume.go:603 lxc/storage_volume.go:604 +#: lxc/storage_volume.go:608 lxc/storage_volume.go:609 msgid "Delete storage volumes" msgstr "" -#: lxc/warning.go:357 lxc/warning.go:358 +#: lxc/warning.go:356 lxc/warning.go:357 msgid "Delete warning" msgstr "" -#: lxc/action.go:31 lxc/action.go:50 lxc/action.go:70 lxc/action.go:91 -#: lxc/alias.go:22 lxc/alias.go:57 lxc/alias.go:103 lxc/alias.go:148 -#: lxc/alias.go:199 lxc/cluster.go:30 lxc/cluster.go:119 lxc/cluster.go:203 -#: lxc/cluster.go:252 lxc/cluster.go:299 lxc/cluster.go:351 lxc/cluster.go:380 -#: lxc/cluster.go:430 lxc/cluster.go:513 lxc/cluster.go:598 lxc/cluster.go:713 -#: lxc/cluster.go:784 lxc/cluster.go:886 lxc/cluster.go:965 lxc/cluster.go:1071 -#: lxc/cluster.go:1090 lxc/cluster_group.go:30 lxc/cluster_group.go:79 -#: lxc/cluster_group.go:150 lxc/cluster_group.go:205 lxc/cluster_group.go:255 -#: lxc/cluster_group.go:368 lxc/cluster_group.go:440 lxc/cluster_group.go:511 -#: lxc/cluster_group.go:557 lxc/cluster_role.go:22 lxc/cluster_role.go:48 -#: lxc/cluster_role.go:102 lxc/config.go:30 lxc/config.go:92 lxc/config.go:372 -#: lxc/config.go:464 lxc/config.go:622 lxc/config.go:742 -#: lxc/config_device.go:24 lxc/config_device.go:78 lxc/config_device.go:204 -#: lxc/config_device.go:281 lxc/config_device.go:352 lxc/config_device.go:446 -#: lxc/config_device.go:544 lxc/config_device.go:551 lxc/config_device.go:664 -#: lxc/config_device.go:737 lxc/config_metadata.go:27 lxc/config_metadata.go:55 -#: lxc/config_metadata.go:180 lxc/config_template.go:28 -#: lxc/config_template.go:68 lxc/config_template.go:111 -#: lxc/config_template.go:153 lxc/config_template.go:241 -#: lxc/config_template.go:301 lxc/config_trust.go:35 lxc/config_trust.go:88 -#: lxc/config_trust.go:230 lxc/config_trust.go:344 lxc/config_trust.go:426 -#: lxc/config_trust.go:528 lxc/config_trust.go:574 lxc/config_trust.go:645 -#: lxc/console.go:36 lxc/copy.go:41 lxc/delete.go:31 lxc/exec.go:41 -#: lxc/export.go:32 lxc/file.go:79 lxc/file.go:119 lxc/file.go:168 -#: lxc/file.go:238 lxc/file.go:460 lxc/file.go:968 lxc/image.go:38 -#: lxc/image.go:145 lxc/image.go:310 lxc/image.go:361 lxc/image.go:488 -#: lxc/image.go:649 lxc/image.go:882 lxc/image.go:1017 lxc/image.go:1336 -#: lxc/image.go:1416 lxc/image.go:1475 lxc/image.go:1527 lxc/image.go:1583 -#: lxc/image_alias.go:25 lxc/image_alias.go:61 lxc/image_alias.go:108 -#: lxc/image_alias.go:153 lxc/image_alias.go:256 lxc/import.go:29 -#: lxc/info.go:34 lxc/init.go:41 lxc/launch.go:25 lxc/list.go:50 lxc/main.go:79 -#: lxc/manpage.go:22 lxc/monitor.go:33 lxc/move.go:37 lxc/network.go:33 -#: lxc/network.go:128 lxc/network.go:213 lxc/network.go:286 lxc/network.go:360 -#: lxc/network.go:410 lxc/network.go:495 lxc/network.go:580 lxc/network.go:706 -#: lxc/network.go:764 lxc/network.go:887 lxc/network.go:980 lxc/network.go:1051 -#: lxc/network.go:1101 lxc/network.go:1171 lxc/network.go:1233 -#: lxc/network_acl.go:30 lxc/network_acl.go:95 lxc/network_acl.go:166 -#: lxc/network_acl.go:219 lxc/network_acl.go:265 lxc/network_acl.go:314 -#: lxc/network_acl.go:397 lxc/network_acl.go:457 lxc/network_acl.go:484 -#: lxc/network_acl.go:615 lxc/network_acl.go:664 lxc/network_acl.go:713 -#: lxc/network_acl.go:728 lxc/network_acl.go:849 lxc/network_forward.go:30 -#: lxc/network_forward.go:87 lxc/network_forward.go:168 -#: lxc/network_forward.go:232 lxc/network_forward.go:328 -#: lxc/network_forward.go:383 lxc/network_forward.go:461 -#: lxc/network_forward.go:488 lxc/network_forward.go:630 -#: lxc/network_forward.go:692 lxc/network_forward.go:707 -#: lxc/network_forward.go:772 lxc/network_peer.go:29 lxc/network_peer.go:82 -#: lxc/network_peer.go:159 lxc/network_peer.go:216 lxc/network_peer.go:330 -#: lxc/network_peer.go:385 lxc/network_peer.go:454 lxc/network_peer.go:481 -#: lxc/network_peer.go:606 lxc/network_zone.go:29 lxc/network_zone.go:86 -#: lxc/network_zone.go:157 lxc/network_zone.go:210 lxc/network_zone.go:259 -#: lxc/network_zone.go:340 lxc/network_zone.go:400 lxc/network_zone.go:427 -#: lxc/network_zone.go:546 lxc/network_zone.go:594 lxc/network_zone.go:651 -#: lxc/network_zone.go:721 lxc/network_zone.go:771 lxc/network_zone.go:819 -#: lxc/network_zone.go:899 lxc/network_zone.go:955 lxc/network_zone.go:982 -#: lxc/network_zone.go:1100 lxc/network_zone.go:1149 lxc/network_zone.go:1164 -#: lxc/network_zone.go:1210 lxc/operation.go:24 lxc/operation.go:56 -#: lxc/operation.go:105 lxc/operation.go:185 lxc/profile.go:29 -#: lxc/profile.go:104 lxc/profile.go:167 lxc/profile.go:249 lxc/profile.go:310 -#: lxc/profile.go:364 lxc/profile.go:414 lxc/profile.go:540 lxc/profile.go:589 -#: lxc/profile.go:650 lxc/profile.go:726 lxc/profile.go:776 lxc/profile.go:835 -#: lxc/profile.go:889 lxc/project.go:30 lxc/project.go:94 lxc/project.go:159 -#: lxc/project.go:222 lxc/project.go:344 lxc/project.go:394 lxc/project.go:495 -#: lxc/project.go:550 lxc/project.go:610 lxc/project.go:639 lxc/project.go:692 -#: lxc/project.go:751 lxc/publish.go:32 lxc/query.go:34 lxc/remote.go:34 -#: lxc/remote.go:89 lxc/remote.go:625 lxc/remote.go:661 lxc/remote.go:749 -#: lxc/remote.go:821 lxc/remote.go:875 lxc/remote.go:913 lxc/rename.go:21 -#: lxc/restore.go:24 lxc/snapshot.go:28 lxc/storage.go:34 lxc/storage.go:93 -#: lxc/storage.go:167 lxc/storage.go:217 lxc/storage.go:339 lxc/storage.go:399 -#: lxc/storage.go:555 lxc/storage.go:632 lxc/storage.go:706 lxc/storage.go:790 -#: lxc/storage_volume.go:42 lxc/storage_volume.go:164 lxc/storage_volume.go:239 -#: lxc/storage_volume.go:330 lxc/storage_volume.go:525 -#: lxc/storage_volume.go:604 lxc/storage_volume.go:679 -#: lxc/storage_volume.go:761 lxc/storage_volume.go:842 -#: lxc/storage_volume.go:1046 lxc/storage_volume.go:1134 -#: lxc/storage_volume.go:1270 lxc/storage_volume.go:1354 -#: lxc/storage_volume.go:1560 lxc/storage_volume.go:1593 -#: lxc/storage_volume.go:1706 lxc/storage_volume.go:1794 -#: lxc/storage_volume.go:1893 lxc/storage_volume.go:1927 -#: lxc/storage_volume.go:2024 lxc/storage_volume.go:2091 -#: lxc/storage_volume.go:2235 lxc/version.go:22 lxc/warning.go:30 -#: lxc/warning.go:72 lxc/warning.go:263 lxc/warning.go:304 lxc/warning.go:358 +#: lxc/action.go:32 lxc/action.go:53 lxc/action.go:76 lxc/action.go:99 +#: lxc/alias.go:23 lxc/alias.go:60 lxc/alias.go:110 lxc/alias.go:159 +#: lxc/alias.go:214 lxc/cluster.go:29 lxc/cluster.go:118 lxc/cluster.go:202 +#: lxc/cluster.go:253 lxc/cluster.go:314 lxc/cluster.go:386 lxc/cluster.go:418 +#: lxc/cluster.go:468 lxc/cluster.go:551 lxc/cluster.go:636 lxc/cluster.go:751 +#: lxc/cluster.go:827 lxc/cluster.go:929 lxc/cluster.go:1008 +#: lxc/cluster.go:1114 lxc/cluster.go:1133 lxc/cluster_group.go:30 +#: lxc/cluster_group.go:84 lxc/cluster_group.go:157 lxc/cluster_group.go:214 +#: lxc/cluster_group.go:266 lxc/cluster_group.go:382 lxc/cluster_group.go:456 +#: lxc/cluster_group.go:529 lxc/cluster_group.go:577 lxc/cluster_group.go:631 +#: lxc/cluster_role.go:23 lxc/cluster_role.go:50 lxc/cluster_role.go:106 +#: lxc/config.go:32 lxc/config.go:95 lxc/config.go:380 lxc/config.go:513 +#: lxc/config.go:730 lxc/config.go:854 lxc/config_device.go:24 +#: lxc/config_device.go:78 lxc/config_device.go:208 lxc/config_device.go:285 +#: lxc/config_device.go:356 lxc/config_device.go:450 lxc/config_device.go:548 +#: lxc/config_device.go:555 lxc/config_device.go:668 lxc/config_device.go:741 +#: lxc/config_metadata.go:27 lxc/config_metadata.go:55 +#: lxc/config_metadata.go:180 lxc/config_template.go:27 +#: lxc/config_template.go:67 lxc/config_template.go:110 +#: lxc/config_template.go:152 lxc/config_template.go:240 +#: lxc/config_template.go:300 lxc/config_trust.go:34 lxc/config_trust.go:87 +#: lxc/config_trust.go:236 lxc/config_trust.go:350 lxc/config_trust.go:432 +#: lxc/config_trust.go:534 lxc/config_trust.go:580 lxc/config_trust.go:651 +#: lxc/console.go:36 lxc/copy.go:40 lxc/delete.go:31 lxc/exec.go:41 +#: lxc/export.go:32 lxc/file.go:78 lxc/file.go:118 lxc/file.go:167 +#: lxc/file.go:237 lxc/file.go:459 lxc/file.go:967 lxc/image.go:37 +#: lxc/image.go:144 lxc/image.go:309 lxc/image.go:360 lxc/image.go:487 +#: lxc/image.go:648 lxc/image.go:881 lxc/image.go:1016 lxc/image.go:1335 +#: lxc/image.go:1415 lxc/image.go:1474 lxc/image.go:1526 lxc/image.go:1582 +#: lxc/image_alias.go:24 lxc/image_alias.go:60 lxc/image_alias.go:107 +#: lxc/image_alias.go:152 lxc/image_alias.go:255 lxc/import.go:28 +#: lxc/info.go:33 lxc/init.go:40 lxc/launch.go:24 lxc/list.go:49 lxc/main.go:79 +#: lxc/manpage.go:22 lxc/monitor.go:33 lxc/move.go:36 lxc/network.go:32 +#: lxc/network.go:127 lxc/network.go:212 lxc/network.go:285 lxc/network.go:364 +#: lxc/network.go:414 lxc/network.go:499 lxc/network.go:584 lxc/network.go:712 +#: lxc/network.go:781 lxc/network.go:904 lxc/network.go:997 lxc/network.go:1068 +#: lxc/network.go:1120 lxc/network.go:1208 lxc/network.go:1272 +#: lxc/network_acl.go:29 lxc/network_acl.go:94 lxc/network_acl.go:165 +#: lxc/network_acl.go:218 lxc/network_acl.go:266 lxc/network_acl.go:327 +#: lxc/network_acl.go:412 lxc/network_acl.go:492 lxc/network_acl.go:522 +#: lxc/network_acl.go:653 lxc/network_acl.go:702 lxc/network_acl.go:751 +#: lxc/network_acl.go:766 lxc/network_acl.go:887 lxc/network_forward.go:29 +#: lxc/network_forward.go:86 lxc/network_forward.go:167 +#: lxc/network_forward.go:231 lxc/network_forward.go:329 +#: lxc/network_forward.go:398 lxc/network_forward.go:496 +#: lxc/network_forward.go:526 lxc/network_forward.go:668 +#: lxc/network_forward.go:730 lxc/network_forward.go:745 +#: lxc/network_forward.go:810 lxc/network_peer.go:28 lxc/network_peer.go:81 +#: lxc/network_peer.go:158 lxc/network_peer.go:215 lxc/network_peer.go:331 +#: lxc/network_peer.go:399 lxc/network_peer.go:488 lxc/network_peer.go:518 +#: lxc/network_peer.go:643 lxc/network_zone.go:28 lxc/network_zone.go:85 +#: lxc/network_zone.go:156 lxc/network_zone.go:211 lxc/network_zone.go:271 +#: lxc/network_zone.go:354 lxc/network_zone.go:434 lxc/network_zone.go:465 +#: lxc/network_zone.go:584 lxc/network_zone.go:632 lxc/network_zone.go:689 +#: lxc/network_zone.go:759 lxc/network_zone.go:811 lxc/network_zone.go:870 +#: lxc/network_zone.go:952 lxc/network_zone.go:1028 lxc/network_zone.go:1058 +#: lxc/network_zone.go:1176 lxc/network_zone.go:1225 lxc/network_zone.go:1240 +#: lxc/network_zone.go:1286 lxc/operation.go:23 lxc/operation.go:55 +#: lxc/operation.go:104 lxc/operation.go:184 lxc/profile.go:29 +#: lxc/profile.go:104 lxc/profile.go:167 lxc/profile.go:250 lxc/profile.go:320 +#: lxc/profile.go:374 lxc/profile.go:424 lxc/profile.go:552 lxc/profile.go:613 +#: lxc/profile.go:674 lxc/profile.go:750 lxc/profile.go:802 lxc/profile.go:878 +#: lxc/profile.go:934 lxc/project.go:29 lxc/project.go:93 lxc/project.go:158 +#: lxc/project.go:221 lxc/project.go:348 lxc/project.go:409 lxc/project.go:510 +#: lxc/project.go:567 lxc/project.go:646 lxc/project.go:677 lxc/project.go:730 +#: lxc/project.go:789 lxc/publish.go:32 lxc/query.go:34 lxc/remote.go:33 +#: lxc/remote.go:88 lxc/remote.go:624 lxc/remote.go:660 lxc/remote.go:748 +#: lxc/remote.go:819 lxc/remote.go:873 lxc/remote.go:911 lxc/rename.go:21 +#: lxc/restore.go:24 lxc/snapshot.go:28 lxc/storage.go:33 lxc/storage.go:92 +#: lxc/storage.go:166 lxc/storage.go:216 lxc/storage.go:340 lxc/storage.go:410 +#: lxc/storage.go:582 lxc/storage.go:661 lxc/storage.go:753 lxc/storage.go:839 +#: lxc/storage_volume.go:43 lxc/storage_volume.go:165 lxc/storage_volume.go:240 +#: lxc/storage_volume.go:331 lxc/storage_volume.go:530 +#: lxc/storage_volume.go:609 lxc/storage_volume.go:684 +#: lxc/storage_volume.go:766 lxc/storage_volume.go:847 +#: lxc/storage_volume.go:1056 lxc/storage_volume.go:1171 +#: lxc/storage_volume.go:1314 lxc/storage_volume.go:1398 +#: lxc/storage_volume.go:1604 lxc/storage_volume.go:1637 +#: lxc/storage_volume.go:1752 lxc/storage_volume.go:1896 +#: lxc/storage_volume.go:2005 lxc/storage_volume.go:2051 +#: lxc/storage_volume.go:2148 lxc/storage_volume.go:2215 +#: lxc/storage_volume.go:2367 lxc/version.go:22 lxc/warning.go:29 +#: lxc/warning.go:71 lxc/warning.go:262 lxc/warning.go:303 lxc/warning.go:357 msgid "Description" msgstr "" -#: lxc/storage_volume.go:1215 +#: lxc/storage_volume.go:1259 #, c-format msgid "Description: %s" msgstr "" -#: lxc/storage_volume.go:335 lxc/storage_volume.go:1565 +#: lxc/storage_volume.go:336 lxc/storage_volume.go:1609 msgid "Destination cluster member name" msgstr "" -#: lxc/network.go:409 lxc/network.go:410 +#: lxc/network.go:413 lxc/network.go:414 msgid "Detach network interfaces from instances" msgstr "" -#: lxc/network.go:494 lxc/network.go:495 +#: lxc/network.go:498 lxc/network.go:499 msgid "Detach network interfaces from profiles" msgstr "" -#: lxc/storage_volume.go:678 lxc/storage_volume.go:679 +#: lxc/storage_volume.go:683 lxc/storage_volume.go:684 msgid "Detach storage volumes from instances" msgstr "" -#: lxc/storage_volume.go:760 lxc/storage_volume.go:761 +#: lxc/storage_volume.go:765 lxc/storage_volume.go:766 msgid "Detach storage volumes from profiles" msgstr "" -#: lxc/config_device.go:181 +#: lxc/config_device.go:185 #, c-format msgid "Device %s added to %s" msgstr "" -#: lxc/config_device.go:422 +#: lxc/config_device.go:426 #, c-format msgid "Device %s overridden for %s" msgstr "" -#: lxc/config_device.go:525 +#: lxc/config_device.go:529 #, c-format msgid "Device %s removed from %s" msgstr "" -#: lxc/utils.go:52 lxc/utils.go:76 +#: lxc/utils.go:53 lxc/utils.go:77 #, c-format msgid "Device already exists: %s" msgstr "" -#: lxc/config_device.go:243 lxc/config_device.go:257 lxc/config_device.go:483 -#: lxc/config_device.go:504 lxc/config_device.go:598 lxc/config_device.go:621 +#: lxc/config_device.go:247 lxc/config_device.go:261 lxc/config_device.go:487 +#: lxc/config_device.go:508 lxc/config_device.go:602 lxc/config_device.go:625 msgid "Device doesn't exist" msgstr "" -#: lxc/config_device.go:624 +#: lxc/config_device.go:628 msgid "" "Device from profile(s) cannot be modified for individual instance. Override " "device or modify profile instead" msgstr "" -#: lxc/config_device.go:507 +#: lxc/config_device.go:511 msgid "" "Device from profile(s) cannot be removed from individual instance. Override " "device or modify profile instead" msgstr "" -#: lxc/config_device.go:260 +#: lxc/config_device.go:264 msgid "Device from profile(s) cannot be retrieved for individual instance" msgstr "" -#: lxc/info.go:267 lxc/info.go:291 +#: lxc/info.go:266 lxc/info.go:290 #, c-format msgid "Device: %s" msgstr "" -#: lxc/init.go:431 +#: lxc/init.go:405 msgid "Didn't get any affected image, instance or snapshot from server" msgstr "" -#: lxc/image.go:664 +#: lxc/image.go:663 msgid "Directory import is not available on this platform" msgstr "" @@ -1525,7 +1555,7 @@ msgstr "" msgid "Directory to run the command in (default /root)" msgstr "" -#: lxc/file.go:976 +#: lxc/file.go:975 msgid "Disable authentication when using SSH SFTP listener" msgstr "" @@ -1537,28 +1567,28 @@ msgstr "" msgid "Disable stdin (reads from /dev/null)" msgstr "" -#: lxc/info.go:426 +#: lxc/info.go:425 #, c-format msgid "Disk %d:" msgstr "" -#: lxc/info.go:511 +#: lxc/info.go:510 msgid "Disk usage:" msgstr "" -#: lxc/info.go:421 +#: lxc/info.go:420 msgid "Disk:" msgstr "" -#: lxc/info.go:424 +#: lxc/info.go:423 msgid "Disks:" msgstr "" -#: lxc/list.go:136 +#: lxc/list.go:135 msgid "Display instances from all projects" msgstr "" -#: lxc/cluster.go:435 +#: lxc/cluster.go:473 msgid "Don't require user confirmation for using --force" msgstr "" @@ -1566,54 +1596,54 @@ msgstr "" msgid "Don't show progress information" msgstr "" -#: lxc/network.go:838 +#: lxc/network.go:855 msgid "Down delay" msgstr "" -#: lxc/info.go:106 lxc/info.go:192 +#: lxc/info.go:105 lxc/info.go:191 #, c-format msgid "Driver: %v (%v)" msgstr "" -#: lxc/network_zone.go:705 +#: lxc/network_zone.go:743 msgid "ENTRIES" msgstr "" -#: lxc/list.go:867 +#: lxc/list.go:866 msgid "EPHEMERAL" msgstr "" -#: lxc/cluster.go:870 lxc/config_trust.go:510 +#: lxc/cluster.go:913 lxc/config_trust.go:516 msgid "EXPIRES AT" msgstr "" -#: lxc/config_trust.go:407 +#: lxc/config_trust.go:413 msgid "EXPIRY DATE" msgstr "" -#: lxc/file.go:70 +#: lxc/file.go:69 msgid "" "Early server side processing of file transfer requests cannot be canceled " "(interrupt two more times to force)" msgstr "" -#: lxc/cluster_group.go:254 lxc/cluster_group.go:255 +#: lxc/cluster_group.go:265 lxc/cluster_group.go:266 msgid "Edit a cluster group" msgstr "" -#: lxc/cluster.go:597 lxc/cluster.go:598 +#: lxc/cluster.go:635 lxc/cluster.go:636 msgid "Edit cluster member configurations as YAML" msgstr "" -#: lxc/file.go:167 lxc/file.go:168 +#: lxc/file.go:166 lxc/file.go:167 msgid "Edit files in instances" msgstr "" -#: lxc/image.go:360 lxc/image.go:361 +#: lxc/image.go:359 lxc/image.go:360 msgid "Edit image properties" msgstr "" -#: lxc/config_template.go:152 lxc/config_template.go:153 +#: lxc/config_template.go:151 lxc/config_template.go:152 msgid "Edit instance file templates" msgstr "" @@ -1621,65 +1651,65 @@ msgstr "" msgid "Edit instance metadata files" msgstr "" -#: lxc/config.go:91 lxc/config.go:92 +#: lxc/config.go:94 lxc/config.go:95 msgid "Edit instance or server configurations as YAML" msgstr "" -#: lxc/network_acl.go:483 lxc/network_acl.go:484 +#: lxc/network_acl.go:521 lxc/network_acl.go:522 msgid "Edit network ACL configurations as YAML" msgstr "" -#: lxc/network.go:579 lxc/network.go:580 +#: lxc/network.go:583 lxc/network.go:584 msgid "Edit network configurations as YAML" msgstr "" -#: lxc/network_forward.go:487 lxc/network_forward.go:488 +#: lxc/network_forward.go:525 lxc/network_forward.go:526 msgid "Edit network forward configurations as YAML" msgstr "" -#: lxc/network_peer.go:480 lxc/network_peer.go:481 +#: lxc/network_peer.go:517 lxc/network_peer.go:518 msgid "Edit network peer configurations as YAML" msgstr "" -#: lxc/network_zone.go:426 lxc/network_zone.go:427 +#: lxc/network_zone.go:464 lxc/network_zone.go:465 msgid "Edit network zone configurations as YAML" msgstr "" -#: lxc/network_zone.go:981 lxc/network_zone.go:982 +#: lxc/network_zone.go:1057 lxc/network_zone.go:1058 msgid "Edit network zone record configurations as YAML" msgstr "" -#: lxc/profile.go:413 lxc/profile.go:414 +#: lxc/profile.go:423 lxc/profile.go:424 msgid "Edit profile configurations as YAML" msgstr "" -#: lxc/project.go:221 lxc/project.go:222 +#: lxc/project.go:220 lxc/project.go:221 msgid "Edit project configurations as YAML" msgstr "" -#: lxc/storage.go:216 lxc/storage.go:217 +#: lxc/storage.go:215 lxc/storage.go:216 msgid "Edit storage pool configurations as YAML" msgstr "" -#: lxc/storage_volume.go:841 lxc/storage_volume.go:842 +#: lxc/storage_volume.go:846 lxc/storage_volume.go:847 msgid "Edit storage volume configurations as YAML" msgstr "" -#: lxc/config_trust.go:229 lxc/config_trust.go:230 +#: lxc/config_trust.go:235 lxc/config_trust.go:236 msgid "Edit trust configurations as YAML" msgstr "" -#: lxc/image.go:1068 lxc/list.go:615 lxc/storage_volume.go:1487 -#: lxc/warning.go:236 +#: lxc/image.go:1067 lxc/list.go:614 lxc/storage_volume.go:1531 +#: lxc/warning.go:235 #, c-format msgid "Empty column entry (redundant, leading or trailing command) in '%s'" msgstr "" -#: lxc/cluster.go:512 +#: lxc/cluster.go:550 msgid "Enable clustering on a single non-clustered LXD server" msgstr "" -#: lxc/cluster.go:513 +#: lxc/cluster.go:551 msgid "" "Enable clustering on a single non-clustered LXD server\n" "\n" @@ -1694,7 +1724,7 @@ msgid "" " for the address if not yet set." msgstr "" -#: lxc/network_zone.go:1166 +#: lxc/network_zone.go:1242 msgid "Entry TTL" msgstr "" @@ -1702,20 +1732,57 @@ msgstr "" msgid "Environment variable to set (e.g. HOME=/home/foo)" msgstr "" -#: lxc/copy.go:56 lxc/init.go:52 +#: lxc/copy.go:55 lxc/init.go:51 msgid "Ephemeral instance" msgstr "" -#: lxc/config_template.go:206 +#: lxc/utils_properties.go:180 +#, c-format +msgid "Error creating decoder: %v" +msgstr "" + +#: lxc/utils_properties.go:185 +#, c-format +msgid "Error decoding data: %v" +msgstr "" + +#: lxc/publish.go:233 +#, c-format +msgid "Error retrieving aliases: %w" +msgstr "" + +#: lxc/cluster.go:361 lxc/config.go:620 lxc/config.go:652 lxc/network.go:1186 +#: lxc/network_acl.go:467 lxc/network_forward.go:469 lxc/network_peer.go:463 +#: lxc/network_zone.go:409 lxc/network_zone.go:1003 lxc/profile.go:856 +#: lxc/project.go:621 lxc/storage.go:723 lxc/storage_volume.go:1829 +#: lxc/storage_volume.go:1867 +#, c-format +msgid "Error setting properties: %v" +msgstr "" + +#: lxc/config.go:614 lxc/config.go:646 +#, c-format +msgid "Error unsetting properties: %v" +msgstr "" + +#: lxc/cluster.go:355 lxc/network.go:1180 lxc/network_acl.go:461 +#: lxc/network_forward.go:463 lxc/network_peer.go:457 lxc/network_zone.go:403 +#: lxc/network_zone.go:997 lxc/profile.go:850 lxc/project.go:615 +#: lxc/storage.go:717 lxc/storage_volume.go:1823 lxc/storage_volume.go:1861 +#, c-format +msgid "Error unsetting property: %v" +msgstr "" + +#: lxc/config_template.go:205 #, c-format msgid "Error updating template file: %s" msgstr "" -#: lxc/cluster.go:1070 lxc/cluster.go:1071 +#: lxc/cluster.go:1113 lxc/cluster.go:1114 msgid "Evacuate cluster member" msgstr "" -#: lxc/cluster.go:1147 +#: lxc/cluster.go:1190 #, c-format msgid "Evacuating cluster member: %s" msgstr "" @@ -1743,32 +1810,32 @@ msgid "" "AND stdout are terminals (stderr is ignored)." msgstr "" -#: lxc/info.go:631 lxc/info.go:682 lxc/storage_volume.go:1271 -#: lxc/storage_volume.go:1321 +#: lxc/info.go:630 lxc/info.go:681 lxc/storage_volume.go:1315 +#: lxc/storage_volume.go:1365 msgid "Expires at" msgstr "" -#: lxc/image.go:956 +#: lxc/image.go:955 #, c-format msgid "Expires: %s" msgstr "" -#: lxc/image.go:958 +#: lxc/image.go:957 msgid "Expires: never" msgstr "" -#: lxc/image.go:487 +#: lxc/image.go:486 msgid "Export and download images" msgstr "" -#: lxc/image.go:488 +#: lxc/image.go:487 msgid "" "Export and download images\n" "\n" "The output target is optional and defaults to the working directory." msgstr "" -#: lxc/storage_volume.go:2090 lxc/storage_volume.go:2091 +#: lxc/storage_volume.go:2214 lxc/storage_volume.go:2215 msgid "Export custom storage volume" msgstr "" @@ -1780,78 +1847,78 @@ msgstr "" msgid "Export instances as backup tarballs." msgstr "" -#: lxc/storage_volume.go:2094 +#: lxc/storage_volume.go:2218 msgid "Export the volume without its snapshots" msgstr "" -#: lxc/export.go:144 lxc/storage_volume.go:2203 +#: lxc/export.go:152 lxc/storage_volume.go:2335 #, c-format msgid "Exporting the backup: %s" msgstr "" -#: lxc/image.go:556 +#: lxc/image.go:555 #, c-format msgid "Exporting the image: %s" msgstr "" -#: lxc/cluster.go:184 +#: lxc/cluster.go:183 msgid "FAILURE DOMAIN" msgstr "" -#: lxc/config_template.go:284 +#: lxc/config_template.go:283 msgid "FILENAME" msgstr "" -#: lxc/config_trust.go:405 lxc/image.go:1053 lxc/image.go:1054 -#: lxc/image_alias.go:236 +#: lxc/config_trust.go:411 lxc/image.go:1052 lxc/image.go:1053 +#: lxc/image_alias.go:235 msgid "FINGERPRINT" msgstr "" -#: lxc/warning.go:211 +#: lxc/warning.go:210 msgid "FIRST SEEN" msgstr "" -#: lxc/file.go:1219 +#: lxc/file.go:1218 #, c-format msgid "Failed SSH handshake with client %q: %v" msgstr "" -#: lxc/file.go:1242 +#: lxc/file.go:1241 #, c-format msgid "Failed accepting channel client %q: %v" msgstr "" -#: lxc/utils.go:227 +#: lxc/utils.go:267 #, c-format msgid "Failed checking instance exists \"%s:%s\": %w" msgstr "" -#: lxc/utils.go:219 +#: lxc/utils.go:259 #, c-format msgid "Failed checking instance snapshot exists \"%s:%s\": %w" msgstr "" -#: lxc/file.go:1269 +#: lxc/file.go:1268 #, c-format msgid "Failed connecting to instance SFTP for client %q: %v" msgstr "" -#: lxc/file.go:1054 +#: lxc/file.go:1053 #, c-format msgid "Failed connecting to instance SFTP: %w" msgstr "" -#: lxc/config_trust.go:204 +#: lxc/config_trust.go:210 #, c-format msgid "Failed converting token operation to certificate add token: %w" msgstr "" -#: lxc/file.go:1175 +#: lxc/file.go:1174 #, c-format msgid "Failed generating SSH host key: %w" msgstr "" -#: lxc/network_peer.go:305 +#: lxc/network_peer.go:304 #, c-format msgid "Failed getting peer's status: %w" msgstr "" @@ -1861,7 +1928,7 @@ msgstr "" msgid "Failed loading profile %q for device override: %w" msgstr "" -#: lxc/file.go:1180 +#: lxc/file.go:1179 #, c-format msgid "Failed parsing SSH host key: %w" msgstr "" @@ -1871,94 +1938,94 @@ msgstr "" msgid "Failed starting command: %w" msgstr "" -#: lxc/file.go:1080 +#: lxc/file.go:1079 #, c-format msgid "Failed starting sshfs: %w" msgstr "" -#: lxc/file.go:1207 +#: lxc/file.go:1206 #, c-format msgid "Failed to accept incoming connection: %w" msgstr "" -#: lxc/remote.go:189 +#: lxc/remote.go:188 msgid "Failed to add remote" msgstr "" -#: lxc/remote.go:240 +#: lxc/remote.go:239 #, c-format msgid "Failed to close server cert file %q: %w" msgstr "" -#: lxc/move.go:285 lxc/move.go:361 lxc/move.go:413 +#: lxc/move.go:284 lxc/move.go:360 lxc/move.go:412 #, c-format msgid "Failed to connect to cluster member: %w" msgstr "" -#: lxc/remote.go:230 +#: lxc/remote.go:229 #, c-format msgid "Failed to create %q: %w" msgstr "" -#: lxc/utils.go:145 +#: lxc/utils.go:185 #, c-format msgid "Failed to create alias %s: %w" msgstr "" -#: lxc/remote.go:255 +#: lxc/remote.go:254 #, c-format msgid "Failed to create certificate: %w" msgstr "" -#: lxc/remote.go:262 +#: lxc/remote.go:261 #, c-format msgid "Failed to find project: %w" msgstr "" -#: lxc/copy.go:410 +#: lxc/copy.go:414 msgid "Failed to get the new instance name" msgstr "" -#: lxc/file.go:1192 +#: lxc/file.go:1191 #, c-format msgid "Failed to listen for connection: %w" msgstr "" -#: lxc/copy.go:363 +#: lxc/copy.go:358 #, c-format msgid "Failed to refresh target instance '%s': %v" msgstr "" -#: lxc/utils.go:134 +#: lxc/utils.go:174 #, c-format msgid "Failed to remove alias %s: %w" msgstr "" -#: lxc/file.go:802 +#: lxc/file.go:801 #, c-format msgid "Failed to walk path for %s: %s" msgstr "" -#: lxc/remote.go:235 +#: lxc/remote.go:234 #, c-format msgid "Failed to write server cert file %q: %w" msgstr "" -#: lxc/list.go:135 +#: lxc/list.go:134 msgid "Fast mode (same as --columns=nsacPt)" msgstr "" -#: lxc/network.go:918 lxc/network_acl.go:125 lxc/network_zone.go:116 -#: lxc/operation.go:134 +#: lxc/network.go:935 lxc/network_acl.go:124 lxc/network_zone.go:115 +#: lxc/operation.go:133 msgid "Filtering isn't supported yet" msgstr "" -#: lxc/image.go:941 +#: lxc/image.go:940 #, c-format msgid "Fingerprint: %s" msgstr "" -#: lxc/cluster.go:1098 +#: lxc/cluster.go:1141 msgid "Force evacuation without user confirmation" msgstr "" @@ -1966,11 +2033,11 @@ msgstr "" msgid "Force pseudo-terminal allocation" msgstr "" -#: lxc/cluster.go:434 +#: lxc/cluster.go:472 msgid "Force removing a member, even if degraded" msgstr "" -#: lxc/action.go:126 +#: lxc/action.go:136 msgid "Force the instance to stop" msgstr "" @@ -1982,7 +2049,7 @@ msgstr "" msgid "Force using the local unix socket" msgstr "" -#: lxc/cluster.go:442 +#: lxc/cluster.go:480 #, c-format msgid "" "Forcefully removing a server from the cluster should only be done as a last\n" @@ -2006,14 +2073,14 @@ msgid "" "Are you really sure you want to force removing %s? (yes/no): " msgstr "" -#: lxc/alias.go:105 lxc/cluster.go:121 lxc/cluster.go:785 -#: lxc/cluster_group.go:370 lxc/config_template.go:243 lxc/config_trust.go:346 -#: lxc/config_trust.go:428 lxc/image.go:1043 lxc/image_alias.go:158 -#: lxc/list.go:134 lxc/network.go:891 lxc/network.go:982 lxc/network_acl.go:98 -#: lxc/network_forward.go:90 lxc/network_peer.go:85 lxc/network_zone.go:89 -#: lxc/network_zone.go:654 lxc/operation.go:107 lxc/profile.go:593 -#: lxc/project.go:396 lxc/project.go:753 lxc/remote.go:665 lxc/storage.go:557 -#: lxc/storage_volume.go:1370 lxc/warning.go:94 +#: lxc/alias.go:112 lxc/cluster.go:120 lxc/cluster.go:828 +#: lxc/cluster_group.go:384 lxc/config_template.go:242 lxc/config_trust.go:352 +#: lxc/config_trust.go:434 lxc/image.go:1042 lxc/image_alias.go:157 +#: lxc/list.go:133 lxc/network.go:908 lxc/network.go:999 lxc/network_acl.go:97 +#: lxc/network_forward.go:89 lxc/network_peer.go:84 lxc/network_zone.go:88 +#: lxc/network_zone.go:692 lxc/operation.go:106 lxc/profile.go:617 +#: lxc/project.go:411 lxc/project.go:791 lxc/remote.go:664 lxc/storage.go:584 +#: lxc/storage_volume.go:1414 lxc/warning.go:93 msgid "Format (csv|json|table|yaml|compact)" msgstr "" @@ -2025,7 +2092,7 @@ msgstr "" msgid "Format (man|md|rest|yaml)" msgstr "" -#: lxc/network.go:850 +#: lxc/network.go:867 msgid "Forward delay" msgstr "" @@ -2034,30 +2101,30 @@ msgstr "" msgid "Found alias %q references an argument outside the given number" msgstr "" -#: lxc/info.go:369 lxc/info.go:380 lxc/info.go:385 lxc/info.go:391 +#: lxc/info.go:368 lxc/info.go:379 lxc/info.go:384 lxc/info.go:390 #, c-format msgid "Free: %v" msgstr "" -#: lxc/info.go:317 lxc/info.go:328 +#: lxc/info.go:316 lxc/info.go:327 #, c-format msgid "Frequency: %vMhz" msgstr "" -#: lxc/info.go:326 +#: lxc/info.go:325 #, c-format msgid "Frequency: %vMhz (min: %vMhz, max: %vMhz)" msgstr "" -#: lxc/remote.go:732 +#: lxc/remote.go:731 msgid "GLOBAL" msgstr "" -#: lxc/info.go:397 +#: lxc/info.go:396 msgid "GPU:" msgstr "" -#: lxc/info.go:400 +#: lxc/info.go:399 msgid "GPUs:" msgstr "" @@ -2065,75 +2132,123 @@ msgstr "" msgid "Generate manpages for all commands" msgstr "" -#: lxc/remote.go:158 lxc/remote.go:391 +#: lxc/remote.go:157 lxc/remote.go:390 msgid "Generating a client certificate. This may take a minute..." msgstr "" -#: lxc/project.go:750 lxc/project.go:751 +#: lxc/project.go:788 lxc/project.go:789 msgid "Get a summary of resource allocations" msgstr "" -#: lxc/image.go:1474 lxc/image.go:1475 +#: lxc/image.go:1473 lxc/image.go:1474 msgid "Get image properties" msgstr "" -#: lxc/network.go:763 lxc/network.go:764 +#: lxc/network.go:780 lxc/network.go:781 msgid "Get runtime information on networks" msgstr "" -#: lxc/cluster.go:251 +#: lxc/cluster.go:255 +msgid "Get the key as a cluster property" +msgstr "" + +#: lxc/network_acl.go:268 +msgid "Get the key as a network ACL property" +msgstr "" + +#: lxc/network_forward.go:331 +msgid "Get the key as a network forward property" +msgstr "" + +#: lxc/network_peer.go:334 +msgid "Get the key as a network peer property" +msgstr "" + +#: lxc/network.go:716 +msgid "Get the key as a network property" +msgstr "" + +#: lxc/network_zone.go:214 +msgid "Get the key as a network zone property" +msgstr "" + +#: lxc/network_zone.go:814 +msgid "Get the key as a network zone record property" +msgstr "" + +#: lxc/profile.go:557 +msgid "Get the key as a profile property" +msgstr "" + +#: lxc/project.go:352 +msgid "Get the key as a project property" +msgstr "" + +#: lxc/storage.go:344 +msgid "Get the key as a storage property" +msgstr "" + +#: lxc/storage_volume.go:1071 +msgid "Get the key as a storage volume property" +msgstr "" + +#: lxc/config.go:384 +msgid "Get the key as an instance property" +msgstr "" + +#: lxc/cluster.go:252 msgid "Get values for cluster member configuration keys" msgstr "" -#: lxc/config_device.go:203 lxc/config_device.go:204 +#: lxc/config_device.go:207 lxc/config_device.go:208 msgid "Get values for device configuration keys" msgstr "" -#: lxc/config.go:371 lxc/config.go:372 +#: lxc/config.go:379 lxc/config.go:380 msgid "Get values for instance or server configuration keys" msgstr "" -#: lxc/network_acl.go:264 lxc/network_acl.go:265 +#: lxc/network_acl.go:265 lxc/network_acl.go:266 msgid "Get values for network ACL configuration keys" msgstr "" -#: lxc/network.go:705 lxc/network.go:706 +#: lxc/network.go:711 lxc/network.go:712 msgid "Get values for network configuration keys" msgstr "" -#: lxc/network_forward.go:327 lxc/network_forward.go:328 +#: lxc/network_forward.go:328 lxc/network_forward.go:329 msgid "Get values for network forward configuration keys" msgstr "" -#: lxc/network_peer.go:329 lxc/network_peer.go:330 +#: lxc/network_peer.go:330 lxc/network_peer.go:331 msgid "Get values for network peer configuration keys" msgstr "" -#: lxc/network_zone.go:209 lxc/network_zone.go:210 +#: lxc/network_zone.go:210 lxc/network_zone.go:211 msgid "Get values for network zone configuration keys" msgstr "" -#: lxc/network_zone.go:770 lxc/network_zone.go:771 +#: lxc/network_zone.go:810 lxc/network_zone.go:811 msgid "Get values for network zone record configuration keys" msgstr "" -#: lxc/profile.go:539 lxc/profile.go:540 +#: lxc/profile.go:551 lxc/profile.go:552 msgid "Get values for profile configuration keys" msgstr "" -#: lxc/project.go:343 lxc/project.go:344 +#: lxc/project.go:347 lxc/project.go:348 msgid "Get values for project configuration keys" msgstr "" -#: lxc/storage.go:338 lxc/storage.go:339 +#: lxc/storage.go:339 lxc/storage.go:340 msgid "Get values for storage pool configuration keys" msgstr "" -#: lxc/storage_volume.go:1045 lxc/storage_volume.go:1046 +#: lxc/storage_volume.go:1055 lxc/storage_volume.go:1056 msgid "Get values for storage volume configuration keys" msgstr "" -#: lxc/storage_volume.go:389 +#: lxc/storage_volume.go:394 #, c-format msgid "Given target %q does not match source volume location %q" msgstr "" @@ -2142,81 +2257,85 @@ msgstr "" msgid "Group ID to run the command as (default 0)" msgstr "" -#: lxc/network.go:1027 +#: lxc/network.go:1044 msgid "HOSTNAME" msgstr "" -#: lxc/info.go:557 +#: lxc/info.go:556 msgid "Host interface" msgstr "" -#: lxc/info.go:368 lxc/info.go:379 +#: lxc/info.go:367 lxc/info.go:378 msgid "Hugepages:\n" msgstr "" -#: lxc/file.go:1292 +#: lxc/file.go:1291 #, c-format msgid "I/O copy from SSH to instance failed: %v" msgstr "" -#: lxc/file.go:1281 +#: lxc/file.go:1280 #, c-format msgid "I/O copy from instance to SSH failed: %v" msgstr "" -#: lxc/file.go:1104 +#: lxc/file.go:1103 #, c-format msgid "I/O copy from instance to sshfs failed: %v" msgstr "" -#: lxc/file.go:1114 +#: lxc/file.go:1113 #, c-format msgid "I/O copy from sshfs to instance failed: %v" msgstr "" -#: lxc/network.go:848 lxc/operation.go:162 +#: lxc/network.go:865 lxc/operation.go:161 msgid "ID" msgstr "" -#: lxc/info.go:111 +#: lxc/info.go:110 #, c-format msgid "ID: %d" msgstr "" -#: lxc/info.go:199 lxc/info.go:266 lxc/info.go:290 +#: lxc/info.go:198 lxc/info.go:265 lxc/info.go:289 #, c-format msgid "ID: %s" msgstr "" -#: lxc/project.go:473 +#: lxc/project.go:488 msgid "IMAGES" msgstr "" -#: lxc/network.go:1029 +#: lxc/network.go:1046 msgid "IP ADDRESS" msgstr "" -#: lxc/info.go:573 +#: lxc/info.go:572 msgid "IP addresses" msgstr "" -#: lxc/network.go:817 +#: lxc/network.go:834 msgid "IP addresses:" msgstr "" -#: lxc/list.go:553 lxc/network.go:958 +#: lxc/list.go:552 lxc/network.go:975 msgid "IPV4" msgstr "" -#: lxc/list.go:554 lxc/network.go:959 +#: lxc/list.go:553 lxc/network.go:976 msgid "IPV6" msgstr "" -#: lxc/config_trust.go:406 +#: lxc/config_trust.go:412 msgid "ISSUE DATE" msgstr "" -#: lxc/snapshot.go:40 lxc/storage_volume.go:1932 +#: lxc/publish.go:41 +msgid "If the image alias already exists, delete and create a new one" +msgstr "" + +#: lxc/snapshot.go:40 lxc/storage_volume.go:2056 msgid "If the snapshot name already exists, delete and create a new one" msgstr "" @@ -2230,23 +2349,23 @@ msgstr "" msgid "Ignore any configured auto-expiry for the instance" msgstr "" -#: lxc/storage_volume.go:1931 +#: lxc/storage_volume.go:2055 msgid "Ignore any configured auto-expiry for the storage volume" msgstr "" -#: lxc/copy.go:65 lxc/move.go:68 +#: lxc/copy.go:64 lxc/move.go:67 msgid "Ignore copy errors for volatile files" msgstr "" -#: lxc/action.go:117 +#: lxc/action.go:127 msgid "Ignore the instance state" msgstr "" -#: lxc/image.go:1397 +#: lxc/image.go:1396 msgid "Image already up to date." msgstr "" -#: lxc/image.go:278 +#: lxc/image.go:277 msgid "Image copied successfully!" msgstr "" @@ -2254,70 +2373,70 @@ msgstr "" msgid "Image expiration date (format: rfc3339)" msgstr "" -#: lxc/image.go:632 +#: lxc/image.go:631 msgid "Image exported successfully!" msgstr "" -#: lxc/image.go:333 lxc/image.go:1359 +#: lxc/image.go:332 lxc/image.go:1358 msgid "Image identifier missing" msgstr "" -#: lxc/image.go:401 lxc/image.go:1551 +#: lxc/image.go:400 lxc/image.go:1550 #, c-format msgid "Image identifier missing: %s" msgstr "" -#: lxc/image.go:852 +#: lxc/image.go:851 #, c-format msgid "Image imported with fingerprint: %s" msgstr "" -#: lxc/image.go:1395 +#: lxc/image.go:1394 msgid "Image refreshed successfully!" msgstr "" -#: lxc/action.go:121 lxc/launch.go:42 +#: lxc/action.go:131 lxc/launch.go:41 msgid "Immediately attach to the console" msgstr "" -#: lxc/storage_volume.go:2235 +#: lxc/storage_volume.go:2367 msgid "Import backups of custom volumes including their snapshots." msgstr "" -#: lxc/import.go:29 +#: lxc/import.go:28 msgid "Import backups of instances including their snapshots." msgstr "" -#: lxc/storage_volume.go:2234 +#: lxc/storage_volume.go:2366 msgid "Import custom storage volumes" msgstr "" -#: lxc/image.go:649 +#: lxc/image.go:648 msgid "" "Import image into the image store\n" "\n" "Directory import is only available on Linux and must be performed as root." msgstr "" -#: lxc/image.go:648 +#: lxc/image.go:647 msgid "Import images into the image store" msgstr "" -#: lxc/import.go:28 +#: lxc/import.go:27 msgid "Import instance backups" msgstr "" -#: lxc/storage_volume.go:2289 +#: lxc/storage_volume.go:2421 #, c-format msgid "Importing custom volume: %s" msgstr "" -#: lxc/import.go:95 +#: lxc/import.go:94 #, c-format msgid "Importing instance: %s" msgstr "" -#: lxc/info.go:228 +#: lxc/info.go:227 msgid "Infiniband:" msgstr "" @@ -2325,42 +2444,42 @@ msgstr "" msgid "Input data" msgstr "" -#: lxc/info.go:683 +#: lxc/info.go:682 msgid "Instance Only" msgstr "" -#: lxc/file.go:1106 +#: lxc/file.go:1105 msgid "Instance disconnected" msgstr "" -#: lxc/file.go:1283 +#: lxc/file.go:1282 #, c-format msgid "Instance disconnected for client %q" msgstr "" -#: lxc/publish.go:78 +#: lxc/publish.go:79 msgid "Instance name is mandatory" msgstr "" -#: lxc/copy.go:416 lxc/init.go:438 +#: lxc/copy.go:420 lxc/init.go:412 #, c-format msgid "Instance name is: %s" msgstr "" -#: lxc/file.go:1026 +#: lxc/file.go:1025 msgid "Instance path cannot be used in SSH SFTP listener mode" msgstr "" -#: lxc/publish.go:293 +#: lxc/publish.go:346 #, c-format msgid "Instance published with fingerprint: %s" msgstr "" -#: lxc/init.go:55 +#: lxc/init.go:54 msgid "Instance type" msgstr "" -#: lxc/remote.go:343 +#: lxc/remote.go:342 #, c-format msgid "Invalid URL scheme \"%s\" in \"%s\"" msgstr "" @@ -2370,25 +2489,20 @@ msgstr "" msgid "Invalid argument %q" msgstr "" -#: lxc/config_trust.go:385 +#: lxc/config_trust.go:391 msgid "Invalid certificate" msgstr "" -#: lxc/list.go:649 +#: lxc/list.go:648 #, c-format msgid "Invalid config key '%s' in '%s'" msgstr "" -#: lxc/list.go:642 +#: lxc/list.go:641 #, c-format msgid "Invalid config key column format (too many fields): '%s'" msgstr "" -#: lxc/utils/table.go:65 -#, c-format -msgid "Invalid format %q" -msgstr "" - #: lxc/monitor.go:71 #, c-format msgid "Invalid format: %s" @@ -2399,41 +2513,41 @@ msgstr "" msgid "Invalid instance name: %s" msgstr "" -#: lxc/file.go:1021 +#: lxc/file.go:1020 #, c-format msgid "Invalid instance path: %q" msgstr "" -#: lxc/utils.go:184 +#: lxc/utils.go:224 #, c-format msgid "Invalid key=value configuration: %s" msgstr "" -#: lxc/list.go:670 +#: lxc/list.go:669 #, c-format msgid "Invalid max width (must -1, 0 or a positive integer) '%s' in '%s'" msgstr "" -#: lxc/list.go:666 +#: lxc/list.go:665 #, c-format msgid "Invalid max width (must be an integer) '%s' in '%s'" msgstr "" -#: lxc/list.go:656 +#: lxc/list.go:655 #, c-format msgid "" "Invalid name in '%s', empty string is only allowed when defining maxWidth" msgstr "" -#: lxc/move.go:135 lxc/storage_volume.go:1644 +#: lxc/move.go:134 lxc/storage_volume.go:1688 msgid "Invalid new snapshot name" msgstr "" -#: lxc/move.go:131 +#: lxc/move.go:130 msgid "Invalid new snapshot name, parent must be the same as source" msgstr "" -#: lxc/storage_volume.go:1640 +#: lxc/storage_volume.go:1684 msgid "Invalid new snapshot name, parent volume must be the same as source" msgstr "" @@ -2441,59 +2555,59 @@ msgstr "" msgid "Invalid number of arguments" msgstr "" -#: lxc/file.go:143 +#: lxc/file.go:142 #, c-format msgid "Invalid path %s" msgstr "" -#: lxc/remote.go:332 +#: lxc/remote.go:331 #, c-format msgid "Invalid protocol: %s" msgstr "" -#: lxc/storage_volume.go:895 lxc/storage_volume.go:1082 -#: lxc/storage_volume.go:1170 lxc/storage_volume.go:1629 -#: lxc/storage_volume.go:1750 lxc/storage_volume.go:1836 +#: lxc/storage_volume.go:903 lxc/storage_volume.go:1104 +#: lxc/storage_volume.go:1216 lxc/storage_volume.go:1673 +#: lxc/storage_volume.go:1806 lxc/storage_volume.go:1946 msgid "Invalid snapshot name" msgstr "" -#: lxc/file.go:306 +#: lxc/file.go:305 #, c-format msgid "Invalid source %s" msgstr "" -#: lxc/file.go:488 +#: lxc/file.go:487 #, c-format msgid "Invalid target %s" msgstr "" -#: lxc/info.go:231 +#: lxc/info.go:230 #, c-format msgid "IsSM: %s (%s)" msgstr "" -#: lxc/image.go:153 +#: lxc/image.go:152 msgid "Keep the image up to date after initial copy" msgstr "" -#: lxc/warning.go:212 +#: lxc/warning.go:211 msgid "LAST SEEN" msgstr "" -#: lxc/list.go:563 +#: lxc/list.go:562 msgid "LAST USED AT" msgstr "" -#: lxc/project.go:812 +#: lxc/project.go:850 msgid "LIMIT" msgstr "" -#: lxc/network_forward.go:145 +#: lxc/network_forward.go:144 msgid "LISTEN ADDRESS" msgstr "" -#: lxc/list.go:599 lxc/network.go:1034 lxc/network_forward.go:152 -#: lxc/operation.go:169 lxc/storage_volume.go:1466 lxc/warning.go:221 +#: lxc/list.go:598 lxc/network.go:1051 lxc/network_forward.go:151 +#: lxc/operation.go:168 lxc/storage_volume.go:1510 lxc/warning.go:220 msgid "LOCATION" msgstr "" @@ -2505,119 +2619,119 @@ msgstr "" msgid "LXD automatically uses either spicy or remote-viewer when present." msgstr "" -#: lxc/cluster.go:155 lxc/cluster.go:819 lxc/cluster.go:913 lxc/cluster.go:1004 -#: lxc/cluster_group.go:404 +#: lxc/cluster.go:154 lxc/cluster.go:862 lxc/cluster.go:956 lxc/cluster.go:1047 +#: lxc/cluster_group.go:419 msgid "LXD server isn't part of a cluster" msgstr "" -#: lxc/info.go:492 +#: lxc/info.go:491 #, c-format msgid "Last Used: %s" msgstr "" -#: lxc/image.go:962 +#: lxc/image.go:961 #, c-format msgid "Last used: %s" msgstr "" -#: lxc/image.go:964 +#: lxc/image.go:963 msgid "Last used: never" msgstr "" -#: lxc/info.go:222 +#: lxc/info.go:221 #, c-format msgid "Link detected: %v" msgstr "" -#: lxc/info.go:224 +#: lxc/info.go:223 #, c-format msgid "Link speed: %dMbit/s (%s duplex)" msgstr "" -#: lxc/network.go:979 lxc/network.go:980 +#: lxc/network.go:996 lxc/network.go:997 msgid "List DHCP leases" msgstr "" -#: lxc/alias.go:102 lxc/alias.go:103 +#: lxc/alias.go:109 lxc/alias.go:110 msgid "List aliases" msgstr "" -#: lxc/config_trust.go:425 lxc/config_trust.go:426 +#: lxc/config_trust.go:431 lxc/config_trust.go:432 msgid "List all active certificate add tokens" msgstr "" -#: lxc/cluster.go:783 lxc/cluster.go:784 +#: lxc/cluster.go:826 lxc/cluster.go:827 msgid "List all active cluster member join tokens" msgstr "" -#: lxc/cluster_group.go:367 lxc/cluster_group.go:368 +#: lxc/cluster_group.go:381 lxc/cluster_group.go:382 msgid "List all the cluster groups" msgstr "" -#: lxc/cluster.go:118 lxc/cluster.go:119 +#: lxc/cluster.go:117 lxc/cluster.go:118 msgid "List all the cluster members" msgstr "" -#: lxc/warning.go:95 +#: lxc/warning.go:94 msgid "List all warnings" msgstr "" -#: lxc/network_acl.go:95 +#: lxc/network_acl.go:94 msgid "List available network ACL" msgstr "" -#: lxc/network_acl.go:94 +#: lxc/network_acl.go:93 msgid "List available network ACLS" msgstr "" -#: lxc/network_forward.go:86 lxc/network_forward.go:87 +#: lxc/network_forward.go:85 lxc/network_forward.go:86 msgid "List available network forwards" msgstr "" -#: lxc/network_peer.go:81 lxc/network_peer.go:82 +#: lxc/network_peer.go:80 lxc/network_peer.go:81 msgid "List available network peers" msgstr "" -#: lxc/network_zone.go:86 +#: lxc/network_zone.go:85 msgid "List available network zone" msgstr "" -#: lxc/network_zone.go:650 lxc/network_zone.go:651 +#: lxc/network_zone.go:688 lxc/network_zone.go:689 msgid "List available network zone records" msgstr "" -#: lxc/network_zone.go:85 +#: lxc/network_zone.go:84 msgid "List available network zoneS" msgstr "" -#: lxc/network.go:886 lxc/network.go:887 +#: lxc/network.go:903 lxc/network.go:904 msgid "List available networks" msgstr "" -#: lxc/storage.go:554 lxc/storage.go:555 +#: lxc/storage.go:581 lxc/storage.go:582 msgid "List available storage pools" msgstr "" -#: lxc/operation.go:104 lxc/operation.go:105 +#: lxc/operation.go:103 lxc/operation.go:104 msgid "List background operations" msgstr "" -#: lxc/image_alias.go:152 +#: lxc/image_alias.go:151 msgid "List image aliases" msgstr "" -#: lxc/image_alias.go:153 +#: lxc/image_alias.go:152 msgid "" "List image aliases\n" "\n" "Filters may be part of the image hash or part of the image alias name.\n" msgstr "" -#: lxc/image.go:1016 +#: lxc/image.go:1015 msgid "List images" msgstr "" -#: lxc/image.go:1017 +#: lxc/image.go:1016 msgid "" "List images\n" "\n" @@ -2644,19 +2758,19 @@ msgid "" " t - Type" msgstr "" -#: lxc/config_device.go:280 lxc/config_device.go:281 +#: lxc/config_device.go:284 lxc/config_device.go:285 msgid "List instance devices" msgstr "" -#: lxc/config_template.go:240 lxc/config_template.go:241 +#: lxc/config_template.go:239 lxc/config_template.go:240 msgid "List instance file templates" msgstr "" -#: lxc/list.go:49 +#: lxc/list.go:48 msgid "List instances" msgstr "" -#: lxc/list.go:50 +#: lxc/list.go:49 #, c-format msgid "" "List instances\n" @@ -2739,23 +2853,23 @@ msgid "" " Defaults to -1 (unlimited). Use 0 to limit to the column header size." msgstr "" -#: lxc/config_trust.go:102 +#: lxc/config_trust.go:101 msgid "List of projects to restrict the certificate to" msgstr "" -#: lxc/profile.go:588 lxc/profile.go:589 +#: lxc/profile.go:612 lxc/profile.go:613 msgid "List profiles" msgstr "" -#: lxc/project.go:393 lxc/project.go:394 +#: lxc/project.go:408 lxc/project.go:409 msgid "List projects" msgstr "" -#: lxc/storage_volume.go:1349 +#: lxc/storage_volume.go:1393 msgid "List storage volumes" msgstr "" -#: lxc/storage_volume.go:1354 +#: lxc/storage_volume.go:1398 msgid "" "List storage volumes\n" "\n" @@ -2774,19 +2888,19 @@ msgid "" " U - Current disk usage" msgstr "" -#: lxc/remote.go:660 lxc/remote.go:661 +#: lxc/remote.go:659 lxc/remote.go:660 msgid "List the available remotes" msgstr "" -#: lxc/config_trust.go:343 lxc/config_trust.go:344 +#: lxc/config_trust.go:349 lxc/config_trust.go:350 msgid "List trusted clients" msgstr "" -#: lxc/warning.go:71 +#: lxc/warning.go:70 msgid "List warnings" msgstr "" -#: lxc/warning.go:72 +#: lxc/warning.go:71 msgid "" "List warnings\n" "\n" @@ -2809,11 +2923,11 @@ msgid "" " t - Type" msgstr "" -#: lxc/operation.go:23 lxc/operation.go:24 +#: lxc/operation.go:22 lxc/operation.go:23 msgid "List, show and delete background operations" msgstr "" -#: lxc/info.go:480 lxc/storage_volume.go:1231 +#: lxc/info.go:479 lxc/storage_volume.go:1275 #, c-format msgid "Location: %s" msgstr "" @@ -2822,75 +2936,75 @@ msgstr "" msgid "Log level filtering can only be used with pretty formatting" msgstr "" -#: lxc/info.go:711 +#: lxc/info.go:710 msgid "Log:" msgstr "" -#: lxc/network.go:860 +#: lxc/network.go:877 msgid "Lower device" msgstr "" -#: lxc/network.go:841 +#: lxc/network.go:858 msgid "Lower devices" msgstr "" -#: lxc/network.go:1028 +#: lxc/network.go:1045 msgid "MAC ADDRESS" msgstr "" -#: lxc/info.go:561 +#: lxc/info.go:560 msgid "MAC address" msgstr "" -#: lxc/network.go:809 +#: lxc/network.go:826 #, c-format msgid "MAC address: %s" msgstr "" -#: lxc/info.go:235 +#: lxc/info.go:234 #, c-format msgid "MAD: %s (%s)" msgstr "" -#: lxc/network.go:957 +#: lxc/network.go:974 msgid "MANAGED" msgstr "" -#: lxc/cluster_group.go:424 +#: lxc/cluster_group.go:439 msgid "MEMBERS" msgstr "" -#: lxc/list.go:564 +#: lxc/list.go:563 msgid "MEMORY USAGE" msgstr "" -#: lxc/list.go:565 +#: lxc/list.go:564 #, c-format msgid "MEMORY USAGE%" msgstr "" -#: lxc/cluster.go:187 +#: lxc/cluster.go:186 msgid "MESSAGE" msgstr "" -#: lxc/network.go:839 +#: lxc/network.go:856 msgid "MII Frequency" msgstr "" -#: lxc/network.go:840 +#: lxc/network.go:857 msgid "MII state" msgstr "" -#: lxc/info.go:565 +#: lxc/info.go:564 msgid "MTU" msgstr "" -#: lxc/network.go:810 +#: lxc/network.go:827 #, c-format msgid "MTU: %d" msgstr "" -#: lxc/image.go:151 lxc/image.go:654 +#: lxc/image.go:150 lxc/image.go:653 msgid "Make image public" msgstr "" @@ -2898,7 +3012,7 @@ msgstr "" msgid "Make the image public" msgstr "" -#: lxc/network.go:32 lxc/network.go:33 +#: lxc/network.go:31 lxc/network.go:32 msgid "Manage and attach instances to networks" msgstr "" @@ -2906,15 +3020,15 @@ msgstr "" msgid "Manage cluster groups" msgstr "" -#: lxc/cluster.go:29 lxc/cluster.go:30 +#: lxc/cluster.go:28 lxc/cluster.go:29 msgid "Manage cluster members" msgstr "" -#: lxc/cluster_role.go:21 lxc/cluster_role.go:22 +#: lxc/cluster_role.go:22 lxc/cluster_role.go:23 msgid "Manage cluster roles" msgstr "" -#: lxc/alias.go:21 lxc/alias.go:22 +#: lxc/alias.go:22 lxc/alias.go:23 msgid "Manage command aliases" msgstr "" @@ -2922,19 +3036,19 @@ msgstr "" msgid "Manage devices" msgstr "" -#: lxc/file.go:78 lxc/file.go:79 +#: lxc/file.go:77 lxc/file.go:78 msgid "Manage files in instances" msgstr "" -#: lxc/image_alias.go:24 lxc/image_alias.go:25 +#: lxc/image_alias.go:23 lxc/image_alias.go:24 msgid "Manage image aliases" msgstr "" -#: lxc/image.go:37 +#: lxc/image.go:36 msgid "Manage images" msgstr "" -#: lxc/image.go:38 +#: lxc/image.go:37 msgid "" "Manage images\n" "\n" @@ -2953,11 +3067,11 @@ msgid "" "hash or alias name (if one is set)." msgstr "" -#: lxc/config.go:29 lxc/config.go:30 +#: lxc/config.go:31 lxc/config.go:32 msgid "Manage instance and server configuration options" msgstr "" -#: lxc/config_template.go:27 lxc/config_template.go:28 +#: lxc/config_template.go:26 lxc/config_template.go:27 msgid "Manage instance file templates" msgstr "" @@ -2965,35 +3079,35 @@ msgstr "" msgid "Manage instance metadata files" msgstr "" -#: lxc/network_acl.go:712 lxc/network_acl.go:713 +#: lxc/network_acl.go:750 lxc/network_acl.go:751 msgid "Manage network ACL rules" msgstr "" -#: lxc/network_acl.go:29 lxc/network_acl.go:30 +#: lxc/network_acl.go:28 lxc/network_acl.go:29 msgid "Manage network ACLs" msgstr "" -#: lxc/network_forward.go:691 lxc/network_forward.go:692 +#: lxc/network_forward.go:729 lxc/network_forward.go:730 msgid "Manage network forward ports" msgstr "" -#: lxc/network_forward.go:29 lxc/network_forward.go:30 +#: lxc/network_forward.go:28 lxc/network_forward.go:29 msgid "Manage network forwards" msgstr "" -#: lxc/network_peer.go:28 lxc/network_peer.go:29 +#: lxc/network_peer.go:27 lxc/network_peer.go:28 msgid "Manage network peerings" msgstr "" -#: lxc/network_zone.go:1148 lxc/network_zone.go:1149 +#: lxc/network_zone.go:1224 lxc/network_zone.go:1225 msgid "Manage network zone record entries" msgstr "" -#: lxc/network_zone.go:593 lxc/network_zone.go:594 +#: lxc/network_zone.go:631 lxc/network_zone.go:632 msgid "Manage network zone records" msgstr "" -#: lxc/network_zone.go:28 lxc/network_zone.go:29 +#: lxc/network_zone.go:27 lxc/network_zone.go:28 msgid "Manage network zones" msgstr "" @@ -3001,19 +3115,19 @@ msgstr "" msgid "Manage profiles" msgstr "" -#: lxc/project.go:29 lxc/project.go:30 +#: lxc/project.go:28 lxc/project.go:29 msgid "Manage projects" msgstr "" -#: lxc/storage.go:33 lxc/storage.go:34 +#: lxc/storage.go:32 lxc/storage.go:33 msgid "Manage storage pools and volumes" msgstr "" -#: lxc/storage_volume.go:41 +#: lxc/storage_volume.go:42 msgid "Manage storage volumes" msgstr "" -#: lxc/storage_volume.go:42 +#: lxc/storage_volume.go:43 msgid "" "Manage storage volumes\n" "\n" @@ -3021,74 +3135,74 @@ msgid "" "\"custom\" (user created) volumes." msgstr "" -#: lxc/remote.go:33 lxc/remote.go:34 +#: lxc/remote.go:32 lxc/remote.go:33 msgid "Manage the list of remote servers" msgstr "" -#: lxc/config_trust.go:34 lxc/config_trust.go:35 +#: lxc/config_trust.go:33 lxc/config_trust.go:34 msgid "Manage trusted clients" msgstr "" -#: lxc/warning.go:29 lxc/warning.go:30 +#: lxc/warning.go:28 lxc/warning.go:29 msgid "Manage warnings" msgstr "" -#: lxc/info.go:139 lxc/info.go:248 +#: lxc/info.go:138 lxc/info.go:247 #, c-format msgid "Maximum number of VFs: %d" msgstr "" -#: lxc/info.go:150 +#: lxc/info.go:149 msgid "Mdev profiles:" msgstr "" -#: lxc/cluster_role.go:83 +#: lxc/cluster_role.go:86 #, c-format msgid "Member %q already has role %q" msgstr "" -#: lxc/cluster_role.go:137 +#: lxc/cluster_role.go:142 #, c-format msgid "Member %q does not have role %q" msgstr "" -#: lxc/cluster.go:764 +#: lxc/cluster.go:807 #, c-format msgid "Member %s join token:" msgstr "" -#: lxc/cluster.go:497 +#: lxc/cluster.go:535 #, c-format msgid "Member %s removed" msgstr "" -#: lxc/cluster.go:410 +#: lxc/cluster.go:448 #, c-format msgid "Member %s renamed to %s" msgstr "" -#: lxc/info.go:529 +#: lxc/info.go:528 msgid "Memory (current)" msgstr "" -#: lxc/info.go:533 +#: lxc/info.go:532 msgid "Memory (peak)" msgstr "" -#: lxc/info.go:545 +#: lxc/info.go:544 msgid "Memory usage:" msgstr "" -#: lxc/info.go:366 +#: lxc/info.go:365 msgid "Memory:" msgstr "" -#: lxc/move.go:303 lxc/move.go:375 lxc/move.go:427 +#: lxc/move.go:302 lxc/move.go:374 lxc/move.go:426 #, c-format msgid "Migration API failure: %w" msgstr "" -#: lxc/move.go:328 lxc/move.go:380 lxc/move.go:432 +#: lxc/move.go:327 lxc/move.go:379 lxc/move.go:431 #, c-format msgid "Migration operation failure: %w" msgstr "" @@ -3098,130 +3212,131 @@ msgid "" "Minimum level for log messages (only available when using pretty format)" msgstr "" -#: lxc/config_trust.go:262 lxc/config_trust.go:670 +#: lxc/config_trust.go:268 lxc/config_trust.go:676 msgid "Missing certificate fingerprint" msgstr "" -#: lxc/cluster_group.go:174 lxc/cluster_group.go:229 lxc/cluster_group.go:279 -#: lxc/cluster_group.go:581 +#: lxc/cluster_group.go:182 lxc/cluster_group.go:239 lxc/cluster_group.go:291 +#: lxc/cluster_group.go:602 msgid "Missing cluster group name" msgstr "" -#: lxc/cluster.go:631 lxc/cluster.go:1119 lxc/cluster_group.go:110 -#: lxc/cluster_group.go:464 lxc/cluster_role.go:70 lxc/cluster_role.go:124 +#: lxc/cluster.go:669 lxc/cluster.go:1162 lxc/cluster_group.go:116 +#: lxc/cluster_group.go:481 lxc/cluster_group.go:655 lxc/cluster_role.go:73 +#: lxc/cluster_role.go:129 msgid "Missing cluster member name" msgstr "" #: lxc/config_metadata.go:103 lxc/config_metadata.go:204 -#: lxc/config_template.go:92 lxc/config_template.go:135 -#: lxc/config_template.go:177 lxc/config_template.go:266 -#: lxc/config_template.go:325 lxc/profile.go:128 lxc/profile.go:201 -#: lxc/profile.go:674 +#: lxc/config_template.go:91 lxc/config_template.go:134 +#: lxc/config_template.go:176 lxc/config_template.go:265 +#: lxc/config_template.go:324 lxc/profile.go:128 lxc/profile.go:201 +#: lxc/profile.go:698 msgid "Missing instance name" msgstr "" -#: lxc/network_forward.go:196 lxc/network_forward.go:260 -#: lxc/network_forward.go:355 lxc/network_forward.go:415 -#: lxc/network_forward.go:539 lxc/network_forward.go:658 -#: lxc/network_forward.go:735 lxc/network_forward.go:801 +#: lxc/network_forward.go:195 lxc/network_forward.go:259 +#: lxc/network_forward.go:358 lxc/network_forward.go:431 +#: lxc/network_forward.go:577 lxc/network_forward.go:696 +#: lxc/network_forward.go:773 lxc/network_forward.go:839 msgid "Missing listen address" msgstr "" -#: lxc/config_device.go:119 lxc/config_device.go:228 lxc/config_device.go:310 -#: lxc/config_device.go:376 lxc/config_device.go:470 lxc/config_device.go:579 -#: lxc/config_device.go:688 +#: lxc/config_device.go:119 lxc/config_device.go:232 lxc/config_device.go:314 +#: lxc/config_device.go:380 lxc/config_device.go:474 lxc/config_device.go:583 +#: lxc/config_device.go:692 msgid "Missing name" msgstr "" -#: lxc/network_acl.go:188 lxc/network_acl.go:240 lxc/network_acl.go:287 -#: lxc/network_acl.go:337 lxc/network_acl.go:424 lxc/network_acl.go:534 -#: lxc/network_acl.go:637 lxc/network_acl.go:686 lxc/network_acl.go:806 -#: lxc/network_acl.go:873 +#: lxc/network_acl.go:187 lxc/network_acl.go:239 lxc/network_acl.go:290 +#: lxc/network_acl.go:350 lxc/network_acl.go:440 lxc/network_acl.go:572 +#: lxc/network_acl.go:675 lxc/network_acl.go:724 lxc/network_acl.go:844 +#: lxc/network_acl.go:911 msgid "Missing network ACL name" msgstr "" -#: lxc/network.go:152 lxc/network.go:237 lxc/network.go:384 lxc/network.go:434 -#: lxc/network.go:519 lxc/network.go:624 lxc/network.go:732 lxc/network.go:790 -#: lxc/network.go:1005 lxc/network.go:1075 lxc/network.go:1130 -#: lxc/network.go:1197 lxc/network_forward.go:116 lxc/network_forward.go:192 -#: lxc/network_forward.go:256 lxc/network_forward.go:351 -#: lxc/network_forward.go:411 lxc/network_forward.go:535 -#: lxc/network_forward.go:654 lxc/network_forward.go:731 -#: lxc/network_forward.go:797 lxc/network_peer.go:111 lxc/network_peer.go:181 -#: lxc/network_peer.go:238 lxc/network_peer.go:353 lxc/network_peer.go:411 -#: lxc/network_peer.go:519 lxc/network_peer.go:628 +#: lxc/network.go:151 lxc/network.go:236 lxc/network.go:388 lxc/network.go:438 +#: lxc/network.go:523 lxc/network.go:628 lxc/network.go:739 lxc/network.go:807 +#: lxc/network.go:1022 lxc/network.go:1092 lxc/network.go:1150 +#: lxc/network.go:1234 lxc/network_forward.go:115 lxc/network_forward.go:191 +#: lxc/network_forward.go:255 lxc/network_forward.go:354 +#: lxc/network_forward.go:427 lxc/network_forward.go:573 +#: lxc/network_forward.go:692 lxc/network_forward.go:769 +#: lxc/network_forward.go:835 lxc/network_peer.go:110 lxc/network_peer.go:180 +#: lxc/network_peer.go:237 lxc/network_peer.go:355 lxc/network_peer.go:426 +#: lxc/network_peer.go:556 lxc/network_peer.go:665 msgid "Missing network name" msgstr "" -#: lxc/network_zone.go:179 lxc/network_zone.go:232 lxc/network_zone.go:282 -#: lxc/network_zone.go:367 lxc/network_zone.go:465 lxc/network_zone.go:568 -#: lxc/network_zone.go:674 lxc/network_zone.go:742 lxc/network_zone.go:841 -#: lxc/network_zone.go:922 lxc/network_zone.go:1121 lxc/network_zone.go:1186 -#: lxc/network_zone.go:1231 +#: lxc/network_zone.go:178 lxc/network_zone.go:234 lxc/network_zone.go:294 +#: lxc/network_zone.go:382 lxc/network_zone.go:503 lxc/network_zone.go:606 +#: lxc/network_zone.go:712 lxc/network_zone.go:780 lxc/network_zone.go:892 +#: lxc/network_zone.go:976 lxc/network_zone.go:1197 lxc/network_zone.go:1262 +#: lxc/network_zone.go:1307 msgid "Missing network zone name" msgstr "" -#: lxc/network_zone.go:792 lxc/network_zone.go:1019 +#: lxc/network_zone.go:833 lxc/network_zone.go:1095 msgid "Missing network zone record name" msgstr "" -#: lxc/network_peer.go:185 lxc/network_peer.go:242 lxc/network_peer.go:357 -#: lxc/network_peer.go:415 lxc/network_peer.go:523 lxc/network_peer.go:632 +#: lxc/network_peer.go:184 lxc/network_peer.go:241 lxc/network_peer.go:359 +#: lxc/network_peer.go:430 lxc/network_peer.go:560 lxc/network_peer.go:669 msgid "Missing peer name" msgstr "" -#: lxc/storage.go:191 lxc/storage.go:261 lxc/storage.go:364 lxc/storage.go:425 -#: lxc/storage.go:661 lxc/storage.go:738 lxc/storage_volume.go:188 -#: lxc/storage_volume.go:263 lxc/storage_volume.go:551 -#: lxc/storage_volume.go:628 lxc/storage_volume.go:703 -#: lxc/storage_volume.go:785 lxc/storage_volume.go:884 -#: lxc/storage_volume.go:1071 lxc/storage_volume.go:1393 -#: lxc/storage_volume.go:1618 lxc/storage_volume.go:1733 -#: lxc/storage_volume.go:1825 lxc/storage_volume.go:1953 -#: lxc/storage_volume.go:2048 +#: lxc/storage.go:190 lxc/storage.go:260 lxc/storage.go:366 lxc/storage.go:436 +#: lxc/storage.go:691 lxc/storage.go:785 lxc/storage_volume.go:189 +#: lxc/storage_volume.go:264 lxc/storage_volume.go:556 +#: lxc/storage_volume.go:633 lxc/storage_volume.go:708 +#: lxc/storage_volume.go:790 lxc/storage_volume.go:892 +#: lxc/storage_volume.go:1093 lxc/storage_volume.go:1437 +#: lxc/storage_volume.go:1662 lxc/storage_volume.go:1789 +#: lxc/storage_volume.go:1935 lxc/storage_volume.go:2077 +#: lxc/storage_volume.go:2172 msgid "Missing pool name" msgstr "" -#: lxc/profile.go:334 lxc/profile.go:388 lxc/profile.go:462 lxc/profile.go:564 -#: lxc/profile.go:750 lxc/profile.go:803 lxc/profile.go:859 +#: lxc/profile.go:344 lxc/profile.go:398 lxc/profile.go:472 lxc/profile.go:577 +#: lxc/profile.go:774 lxc/profile.go:829 lxc/profile.go:902 msgid "Missing profile name" msgstr "" -#: lxc/project.go:119 lxc/project.go:188 lxc/project.go:266 lxc/project.go:368 -#: lxc/project.go:519 lxc/project.go:577 lxc/project.go:663 lxc/project.go:776 +#: lxc/project.go:118 lxc/project.go:187 lxc/project.go:268 lxc/project.go:372 +#: lxc/project.go:534 lxc/project.go:594 lxc/project.go:701 lxc/project.go:814 msgid "Missing project name" msgstr "" -#: lxc/profile.go:275 +#: lxc/profile.go:277 msgid "Missing source profile name" msgstr "" -#: lxc/storage_volume.go:360 +#: lxc/storage_volume.go:361 msgid "Missing source volume name" msgstr "" -#: lxc/storage_volume.go:1159 +#: lxc/storage_volume.go:1205 msgid "Missing storage pool name" msgstr "" -#: lxc/file.go:587 +#: lxc/file.go:586 msgid "Missing target directory" msgstr "" -#: lxc/network_peer.go:246 +#: lxc/network_peer.go:245 msgid "Missing target network" msgstr "" -#: lxc/network.go:835 +#: lxc/network.go:852 msgid "Mode" msgstr "" -#: lxc/info.go:270 +#: lxc/info.go:269 #, c-format msgid "Model: %s" msgstr "" -#: lxc/info.go:130 +#: lxc/info.go:129 #, c-format msgid "Model: %v" msgstr "" @@ -3237,24 +3352,24 @@ msgid "" "By default the monitor will listen to all message types." msgstr "" -#: lxc/network.go:454 lxc/network.go:539 lxc/storage_volume.go:723 -#: lxc/storage_volume.go:804 +#: lxc/network.go:458 lxc/network.go:543 lxc/storage_volume.go:728 +#: lxc/storage_volume.go:809 msgid "More than one device matches, specify the device name" msgstr "" -#: lxc/file.go:281 +#: lxc/file.go:280 msgid "More than one file to download, but target is not a directory" msgstr "" -#: lxc/file.go:967 lxc/file.go:968 +#: lxc/file.go:966 lxc/file.go:967 msgid "Mount files from instances" msgstr "" -#: lxc/move.go:36 +#: lxc/move.go:35 msgid "Move instances within or in between LXD servers" msgstr "" -#: lxc/move.go:37 +#: lxc/move.go:36 msgid "" "Move instances within or in between LXD servers\n" "\n" @@ -3270,197 +3385,197 @@ msgid "" "versions.\n" msgstr "" -#: lxc/storage_volume.go:1559 lxc/storage_volume.go:1560 +#: lxc/storage_volume.go:1603 lxc/storage_volume.go:1604 msgid "Move storage volumes between pools" msgstr "" -#: lxc/move.go:62 +#: lxc/move.go:61 msgid "Move the instance without its snapshots" msgstr "" -#: lxc/storage_volume.go:1566 +#: lxc/storage_volume.go:1610 msgid "Move to a project different from the source" msgstr "" -#: lxc/storage_volume.go:432 +#: lxc/storage_volume.go:437 #, c-format msgid "Moving the storage volume: %s" msgstr "" -#: lxc/network_forward.go:845 +#: lxc/network_forward.go:883 msgid "Multiple ports match. Use --force to remove them all" msgstr "" -#: lxc/network_acl.go:928 +#: lxc/network_acl.go:966 msgid "Multiple rules match. Use --force to remove them all" msgstr "" -#: lxc/image.go:666 +#: lxc/image.go:665 msgid "Must run as root to import from directory" msgstr "" -#: lxc/action.go:220 +#: lxc/action.go:234 msgid "Must supply instance name for: " msgstr "" -#: lxc/cluster.go:180 lxc/cluster.go:868 lxc/cluster_group.go:422 -#: lxc/config_trust.go:403 lxc/config_trust.go:508 lxc/list.go:566 -#: lxc/network.go:955 lxc/network_acl.go:148 lxc/network_peer.go:140 -#: lxc/network_zone.go:139 lxc/network_zone.go:703 lxc/profile.go:633 -#: lxc/project.go:472 lxc/remote.go:726 lxc/storage.go:607 -#: lxc/storage_volume.go:1458 +#: lxc/cluster.go:179 lxc/cluster.go:911 lxc/cluster_group.go:437 +#: lxc/config_trust.go:409 lxc/config_trust.go:514 lxc/list.go:565 +#: lxc/network.go:972 lxc/network_acl.go:147 lxc/network_peer.go:139 +#: lxc/network_zone.go:138 lxc/network_zone.go:741 lxc/profile.go:657 +#: lxc/project.go:487 lxc/remote.go:725 lxc/storage.go:634 +#: lxc/storage_volume.go:1502 msgid "NAME" msgstr "" -#: lxc/project.go:476 +#: lxc/project.go:491 msgid "NETWORKS" msgstr "" -#: lxc/info.go:409 +#: lxc/info.go:408 msgid "NIC:" msgstr "" -#: lxc/info.go:412 +#: lxc/info.go:411 msgid "NICs:" msgstr "" -#: lxc/network.go:932 lxc/operation.go:146 lxc/project.go:440 -#: lxc/project.go:445 lxc/project.go:450 lxc/project.go:455 lxc/remote.go:682 -#: lxc/remote.go:687 lxc/remote.go:692 +#: lxc/network.go:949 lxc/operation.go:145 lxc/project.go:455 +#: lxc/project.go:460 lxc/project.go:465 lxc/project.go:470 lxc/remote.go:681 +#: lxc/remote.go:686 lxc/remote.go:691 msgid "NO" msgstr "" -#: lxc/info.go:91 lxc/info.go:177 lxc/info.go:264 +#: lxc/info.go:90 lxc/info.go:176 lxc/info.go:263 #, c-format msgid "NUMA node: %v" msgstr "" -#: lxc/info.go:375 +#: lxc/info.go:374 msgid "NUMA nodes:\n" msgstr "" -#: lxc/info.go:127 +#: lxc/info.go:126 msgid "NVIDIA information:" msgstr "" -#: lxc/info.go:132 +#: lxc/info.go:131 #, c-format msgid "NVRM Version: %v" msgstr "" -#: lxc/info.go:629 lxc/info.go:680 lxc/storage_volume.go:1269 -#: lxc/storage_volume.go:1319 +#: lxc/info.go:628 lxc/info.go:679 lxc/storage_volume.go:1313 +#: lxc/storage_volume.go:1363 msgid "Name" msgstr "" -#: lxc/remote.go:140 +#: lxc/remote.go:139 msgid "Name of the project to use for this remote:" msgstr "" -#: lxc/info.go:463 lxc/network.go:808 lxc/storage_volume.go:1213 +#: lxc/info.go:462 lxc/network.go:825 lxc/storage_volume.go:1257 #, c-format msgid "Name: %s" msgstr "" -#: lxc/info.go:304 +#: lxc/info.go:303 #, c-format msgid "Name: %v" msgstr "" -#: lxc/network.go:342 +#: lxc/network.go:346 #, c-format msgid "Network %s created" msgstr "" -#: lxc/network.go:394 +#: lxc/network.go:398 #, c-format msgid "Network %s deleted" msgstr "" -#: lxc/network.go:340 +#: lxc/network.go:344 #, c-format msgid "Network %s pending on member %s" msgstr "" -#: lxc/network.go:1085 +#: lxc/network.go:1102 #, c-format msgid "Network %s renamed to %s" msgstr "" -#: lxc/network_acl.go:381 +#: lxc/network_acl.go:394 #, c-format msgid "Network ACL %s created" msgstr "" -#: lxc/network_acl.go:696 +#: lxc/network_acl.go:734 #, c-format msgid "Network ACL %s deleted" msgstr "" -#: lxc/network_acl.go:647 +#: lxc/network_acl.go:685 #, c-format msgid "Network ACL %s renamed to %s" msgstr "" -#: lxc/network_zone.go:324 +#: lxc/network_zone.go:336 #, c-format msgid "Network Zone %s created" msgstr "" -#: lxc/network_zone.go:578 +#: lxc/network_zone.go:616 #, c-format msgid "Network Zone %s deleted" msgstr "" -#: lxc/network_forward.go:312 +#: lxc/network_forward.go:311 #, c-format msgid "Network forward %s created" msgstr "" -#: lxc/network_forward.go:675 +#: lxc/network_forward.go:713 #, c-format msgid "Network forward %s deleted" msgstr "" -#: lxc/init.go:53 +#: lxc/init.go:52 msgid "Network name" msgstr "" -#: lxc/network_peer.go:309 +#: lxc/network_peer.go:308 #, c-format msgid "Network peer %s created" msgstr "" -#: lxc/network_peer.go:644 +#: lxc/network_peer.go:681 #, c-format msgid "Network peer %s deleted" msgstr "" -#: lxc/network_peer.go:313 +#: lxc/network_peer.go:312 #, c-format msgid "Network peer %s is in unexpected state %q" msgstr "" -#: lxc/network_peer.go:311 +#: lxc/network_peer.go:310 #, c-format msgid "" "Network peer %s pending (please complete mutual peering on peer network)" msgstr "" -#: lxc/network.go:289 +#: lxc/network.go:293 msgid "Network type" msgstr "" -#: lxc/info.go:586 lxc/network.go:825 +#: lxc/info.go:585 lxc/network.go:842 msgid "Network usage:" msgstr "" -#: lxc/network_zone.go:883 +#: lxc/network_zone.go:934 #, c-format msgid "Network zone record %s created" msgstr "" -#: lxc/network_zone.go:1131 +#: lxc/network_zone.go:1207 #, c-format msgid "Network zone record %s deleted" msgstr "" @@ -3469,100 +3584,100 @@ msgstr "" msgid "New alias to define at target" msgstr "" -#: lxc/image.go:154 lxc/image.go:655 +#: lxc/image.go:153 lxc/image.go:654 msgid "New aliases to add to the image" msgstr "" -#: lxc/copy.go:54 lxc/init.go:51 lxc/move.go:59 +#: lxc/copy.go:53 lxc/init.go:50 lxc/move.go:58 msgid "New key/value to apply to a specific device" msgstr "" -#: lxc/config_trust.go:631 +#: lxc/config_trust.go:637 #, c-format msgid "No certificate add token for member %s on remote: %s" msgstr "" -#: lxc/cluster.go:951 +#: lxc/cluster.go:994 #, c-format msgid "No cluster join token for member %s on remote: %s" msgstr "" -#: lxc/network.go:463 lxc/network.go:548 +#: lxc/network.go:467 lxc/network.go:552 msgid "No device found for this network" msgstr "" -#: lxc/storage_volume.go:732 lxc/storage_volume.go:813 +#: lxc/storage_volume.go:737 lxc/storage_volume.go:818 msgid "No device found for this storage volume" msgstr "" -#: lxc/network_forward.go:856 +#: lxc/network_forward.go:894 msgid "No matching port(s) found" msgstr "" -#: lxc/network_acl.go:939 +#: lxc/network_acl.go:977 msgid "No matching rule(s) found" msgstr "" -#: lxc/storage_volume.go:374 +#: lxc/storage_volume.go:375 msgid "No storage pool for source volume specified" msgstr "" -#: lxc/storage_volume.go:416 +#: lxc/storage_volume.go:421 msgid "No storage pool for target volume specified" msgstr "" -#: lxc/config_device.go:130 lxc/config_device.go:400 +#: lxc/config_device.go:130 lxc/config_device.go:404 #, c-format msgid "No value found in %q" msgstr "" -#: lxc/info.go:377 +#: lxc/info.go:376 #, c-format msgid "Node %d:\n" msgstr "" -#: lxc/storage_volume.go:1656 +#: lxc/storage_volume.go:1700 msgid "Not a snapshot name" msgstr "" -#: lxc/network.go:867 +#: lxc/network.go:884 msgid "OVN:" msgstr "" -#: lxc/storage_volume.go:208 lxc/storage_volume.go:283 +#: lxc/storage_volume.go:209 lxc/storage_volume.go:284 msgid "Only \"custom\" volumes can be attached to instances" msgstr "" -#: lxc/storage_volume.go:2133 +#: lxc/storage_volume.go:2257 msgid "Only \"custom\" volumes can be exported" msgstr "" -#: lxc/storage_volume.go:1966 +#: lxc/storage_volume.go:2090 msgid "Only \"custom\" volumes can be snapshotted" msgstr "" -#: lxc/remote.go:326 +#: lxc/remote.go:325 msgid "Only https URLs are supported for simplestreams" msgstr "" -#: lxc/image.go:743 +#: lxc/image.go:742 msgid "Only https:// is supported for remote image import" msgstr "" -#: lxc/storage_volume.go:1177 +#: lxc/storage_volume.go:1223 msgid "Only instance or custom volumes are supported" msgstr "" -#: lxc/network.go:650 lxc/network.go:1145 +#: lxc/network.go:654 lxc/network.go:1165 msgid "Only managed networks can be modified" msgstr "" -#: lxc/operation.go:86 +#: lxc/operation.go:85 #, c-format msgid "Operation %s deleted" msgstr "" -#: lxc/info.go:684 lxc/storage_volume.go:1323 +#: lxc/info.go:683 lxc/storage_volume.go:1367 msgid "Optimized Storage" msgstr "" @@ -3574,57 +3689,57 @@ msgstr "" msgid "Override the terminal mode (auto, interactive or non-interactive)" msgstr "" -#: lxc/info.go:102 lxc/info.go:188 +#: lxc/info.go:101 lxc/info.go:187 #, c-format msgid "PCI address: %v" msgstr "" -#: lxc/network_peer.go:142 +#: lxc/network_peer.go:141 msgid "PEER" msgstr "" -#: lxc/list.go:568 +#: lxc/list.go:567 msgid "PID" msgstr "" -#: lxc/info.go:484 +#: lxc/info.go:483 #, c-format msgid "PID: %d" msgstr "" -#: lxc/network_forward.go:148 +#: lxc/network_forward.go:147 msgid "PORTS" msgstr "" -#: lxc/list.go:567 +#: lxc/list.go:566 msgid "PROCESSES" msgstr "" -#: lxc/list.go:569 lxc/project.go:474 +#: lxc/list.go:568 lxc/project.go:489 msgid "PROFILES" msgstr "" -#: lxc/list.go:560 lxc/storage_volume.go:1477 lxc/warning.go:213 +#: lxc/list.go:559 lxc/storage_volume.go:1521 lxc/warning.go:212 msgid "PROJECT" msgstr "" -#: lxc/remote.go:728 +#: lxc/remote.go:727 msgid "PROTOCOL" msgstr "" -#: lxc/image.go:1055 lxc/remote.go:730 +#: lxc/image.go:1054 lxc/remote.go:729 msgid "PUBLIC" msgstr "" -#: lxc/info.go:570 lxc/network.go:828 +#: lxc/info.go:569 lxc/network.go:845 msgid "Packets received" msgstr "" -#: lxc/info.go:571 lxc/network.go:829 +#: lxc/info.go:570 lxc/network.go:846 msgid "Packets sent" msgstr "" -#: lxc/info.go:287 +#: lxc/info.go:286 msgid "Partitions:" msgstr "" @@ -3633,50 +3748,50 @@ msgstr "" msgid "Password for %s: " msgstr "" -#: lxc/action.go:49 lxc/action.go:50 +#: lxc/action.go:52 lxc/action.go:53 msgid "Pause instances" msgstr "" -#: lxc/copy.go:64 +#: lxc/copy.go:63 msgid "Perform an incremental copy" msgstr "" -#: lxc/remote.go:181 +#: lxc/remote.go:180 msgid "Please provide an alternate server address (empty to abort):" msgstr "" -#: lxc/config_trust.go:149 +#: lxc/config_trust.go:155 msgid "Please provide client name: " msgstr "" -#: lxc/cluster.go:738 +#: lxc/cluster.go:781 msgid "Please provide cluster member name: " msgstr "" -#: lxc/remote.go:456 +#: lxc/remote.go:455 msgid "Please type 'y', 'n' or the fingerprint:" msgstr "" -#: lxc/info.go:214 +#: lxc/info.go:213 #, c-format msgid "Port type: %s" msgstr "" -#: lxc/info.go:196 +#: lxc/info.go:195 msgid "Ports:" msgstr "" -#: lxc/file.go:1084 +#: lxc/file.go:1083 msgid "Press ctrl+c to finish" msgstr "" -#: lxc/cluster.go:680 lxc/cluster_group.go:328 lxc/config.go:264 -#: lxc/config.go:339 lxc/config_metadata.go:148 lxc/config_template.go:207 -#: lxc/config_trust.go:309 lxc/image.go:455 lxc/network.go:675 -#: lxc/network_acl.go:583 lxc/network_forward.go:598 lxc/network_peer.go:574 -#: lxc/network_zone.go:514 lxc/network_zone.go:1068 lxc/profile.go:509 -#: lxc/project.go:313 lxc/storage.go:308 lxc/storage_volume.go:982 -#: lxc/storage_volume.go:1014 +#: lxc/cluster.go:718 lxc/cluster_group.go:340 lxc/config.go:269 +#: lxc/config.go:344 lxc/config_metadata.go:148 lxc/config_template.go:206 +#: lxc/config_trust.go:315 lxc/image.go:454 lxc/network.go:679 +#: lxc/network_acl.go:621 lxc/network_forward.go:636 lxc/network_peer.go:611 +#: lxc/network_zone.go:552 lxc/network_zone.go:1144 lxc/profile.go:519 +#: lxc/project.go:315 lxc/storage.go:307 lxc/storage_volume.go:990 +#: lxc/storage_volume.go:1022 msgid "Press enter to open the editor again or ctrl+c to abort change" msgstr "" @@ -3696,7 +3811,7 @@ msgstr "" msgid "Print version number" msgstr "" -#: lxc/info.go:498 +#: lxc/info.go:497 #, c-format msgid "Processes: %d" msgstr "" @@ -3706,7 +3821,7 @@ msgstr "" msgid "Processing aliases failed: %s" msgstr "" -#: lxc/info.go:98 lxc/info.go:184 +#: lxc/info.go:97 lxc/info.go:183 #, c-format msgid "Product: %v (%v)" msgstr "" @@ -3716,36 +3831,36 @@ msgstr "" msgid "Profile %s added to %s" msgstr "" -#: lxc/profile.go:347 +#: lxc/profile.go:357 #, c-format msgid "Profile %s created" msgstr "" -#: lxc/profile.go:398 +#: lxc/profile.go:408 #, c-format msgid "Profile %s deleted" msgstr "" -#: lxc/profile.go:684 +#: lxc/profile.go:708 #, c-format msgid "Profile %s isn't currently applied to %s" msgstr "" -#: lxc/profile.go:709 +#: lxc/profile.go:733 #, c-format msgid "Profile %s removed from %s" msgstr "" -#: lxc/profile.go:760 +#: lxc/profile.go:784 #, c-format msgid "Profile %s renamed to %s" msgstr "" -#: lxc/copy.go:55 lxc/init.go:50 +#: lxc/copy.go:54 lxc/init.go:49 msgid "Profile to apply to the new instance" msgstr "" -#: lxc/move.go:60 +#: lxc/move.go:59 msgid "Profile to apply to the target instance" msgstr "" @@ -3754,46 +3869,132 @@ msgstr "" msgid "Profiles %s applied to %s" msgstr "" -#: lxc/image.go:994 +#: lxc/image.go:993 msgid "Profiles:" msgstr "" -#: lxc/image.go:992 +#: lxc/image.go:991 msgid "Profiles: " msgstr "" -#: lxc/project.go:142 +#: lxc/project.go:141 #, c-format msgid "Project %s created" msgstr "" -#: lxc/project.go:198 +#: lxc/project.go:197 #, c-format msgid "Project %s deleted" msgstr "" -#: lxc/project.go:534 +#: lxc/project.go:549 #, c-format msgid "Project %s renamed to %s" msgstr "" -#: lxc/remote.go:105 +#: lxc/remote.go:104 msgid "Project to use for the remote" msgstr "" -#: lxc/image.go:967 +#: lxc/image.go:966 msgid "Properties:" msgstr "" -#: lxc/image.go:1510 +#: lxc/image.go:1509 msgid "Property not found" msgstr "" -#: lxc/remote.go:103 +#: lxc/storage_volume.go:1173 +msgid "" +"Provide the type of the storage volume if it is not custom.\n" +"Supported types are custom, container and virtual-machine.\n" +"\n" +"lxc storage volume info default data\n" +" Returns state information for a custom volume \"data\" in pool " +"\"default\".\n" +"\n" +"lxc storage volume info default virtual-machine/data\n" +" Returns state information for a virtual machine \"data\" in pool " +"\"default\"." +msgstr "" + +#: lxc/storage_volume.go:1058 +msgid "" +"Provide the type of the storage volume if it is not custom.\n" +"Supported types are custom, image, container and virtual-machine.\n" +"\n" +"Add the name of the snapshot if type is one of custom, container or virtual-" +"machine.\n" +"\n" +"lxc storage volume get default data size\n" +" Returns the size of a custom volume \"data\" in pool \"default\".\n" +"\n" +"lxc storage volume get default virtual-machine/data snapshots.expiry\n" +" Returns the snapshot expiration period for a virtual machine \"data\" in " +"pool \"default\"." +msgstr "" + +#: lxc/storage_volume.go:1898 +msgid "" +"Provide the type of the storage volume if it is not custom.\n" +"Supported types are custom, image, container and virtual-machine.\n" +"\n" +"Add the name of the snapshot if type is one of custom, container or virtual-" +"machine.\n" +"\n" +"lxc storage volume show default data\n" +" Will show the properties of a custom volume called \"data\" in the " +"\"default\" pool.\n" +"\n" +"lxc storage volume show default container/data\n" +" Will show the properties of the filesystem for a container called " +"\"data\" in the \"default\" pool.\n" +"\n" +"lxc storage volume show default virtual-machine/data/snap0\n" +" Will show the properties of snapshot \"snap0\" for a virtual machine " +"called \"data\" in the \"default\" pool." +msgstr "" + +#: lxc/storage_volume.go:849 +msgid "" +"Provide the type of the storage volume if it is not custom.\n" +"Supported types are custom, image, container and virtual-machine.\n" +"\n" +"lxc storage volume edit [:] [/] < volume.yaml\n" +" Update a storage volume using the content of pool.yaml." +msgstr "" + +#: lxc/storage_volume.go:1757 +msgid "" +"Provide the type of the storage volume if it is not custom.\n" +"Supported types are custom, image, container and virtual-machine.\n" +"\n" +"lxc storage volume set default data size=1GiB\n" +" Sets the size of a custom volume \"data\" in pool \"default\" to 1 GiB.\n" +"\n" +"lxc storage volume set default virtual-machine/data snapshots.expiry=7d\n" +" Sets the snapshot expiration period for a virtual machine \"data\" in " +"pool \"default\" to seven days." +msgstr "" + +#: lxc/storage_volume.go:2007 +msgid "" +"Provide the type of the storage volume if it is not custom.\n" +"Supported types are custom, image, container and virtual-machine.\n" +"\n" +"lxc storage volume unset default data size\n" +" Remotes the size/quota of a custom volume \"data\" in pool \"default\".\n" +"\n" +"lxc storage volume unset default virtual-machine/data snapshots.expiry\n" +" Removes the snapshot expiration period for a virtual machine \"data\" in " +"pool \"default\"." +msgstr "" + +#: lxc/remote.go:102 msgid "Public image server" msgstr "" -#: lxc/image.go:945 +#: lxc/image.go:944 #, c-format msgid "Public: %s" msgstr "" @@ -3802,25 +4003,25 @@ msgstr "" msgid "Publish instances as images" msgstr "" -#: lxc/publish.go:237 +#: lxc/publish.go:252 #, c-format msgid "Publishing instance: %s" msgstr "" -#: lxc/file.go:237 lxc/file.go:238 +#: lxc/file.go:236 lxc/file.go:237 msgid "Pull files from instances" msgstr "" -#: lxc/file.go:409 lxc/file.go:749 +#: lxc/file.go:408 lxc/file.go:748 #, c-format msgid "Pulling %s from %s: %%s" msgstr "" -#: lxc/file.go:459 lxc/file.go:460 +#: lxc/file.go:458 lxc/file.go:459 msgid "Push files into instances" msgstr "" -#: lxc/file.go:683 lxc/file.go:849 +#: lxc/file.go:682 lxc/file.go:848 #, c-format msgid "Pushing %s to %s: %%s" msgstr "" @@ -3829,84 +4030,80 @@ msgstr "" msgid "Query path must start with /" msgstr "" -#: lxc/image.go:493 lxc/image.go:885 lxc/image.go:1419 +#: lxc/image.go:492 lxc/image.go:884 lxc/image.go:1418 msgid "Query virtual machine images" msgstr "" -#: lxc/project.go:811 +#: lxc/project.go:849 msgid "RESOURCE" msgstr "" -#: lxc/cluster.go:182 +#: lxc/cluster.go:181 msgid "ROLES" msgstr "" -#: lxc/info.go:283 lxc/info.go:292 +#: lxc/info.go:282 lxc/info.go:291 #, c-format msgid "Read-Only: %v" msgstr "" -#: lxc/file.go:245 lxc/file.go:466 +#: lxc/file.go:244 lxc/file.go:465 msgid "Recursively transfer files" msgstr "" -#: lxc/storage_volume.go:338 +#: lxc/storage_volume.go:339 msgid "Refresh and update the existing storage volume copies" msgstr "" -#: lxc/image.go:1335 lxc/image.go:1336 +#: lxc/image.go:1334 lxc/image.go:1335 msgid "Refresh images" msgstr "" -#: lxc/copy.go:376 +#: lxc/copy.go:380 #, c-format msgid "Refreshing instance: %s" msgstr "" -#: lxc/image.go:1364 +#: lxc/image.go:1363 #, c-format msgid "Refreshing the image: %s" msgstr "" -#: lxc/remote.go:778 +#: lxc/remote.go:777 #, c-format msgid "Remote %s already exists" msgstr "" -#: lxc/project.go:718 lxc/remote.go:769 lxc/remote.go:841 lxc/remote.go:895 -#: lxc/remote.go:933 +#: lxc/project.go:756 lxc/remote.go:768 lxc/remote.go:839 lxc/remote.go:893 +#: lxc/remote.go:931 #, c-format msgid "Remote %s doesn't exist" msgstr "" -#: lxc/remote.go:295 +#: lxc/remote.go:294 #, c-format msgid "Remote %s exists as <%s>" msgstr "" -#: lxc/remote.go:849 +#: lxc/remote.go:847 #, c-format msgid "Remote %s is global and cannot be removed" msgstr "" -#: lxc/remote.go:773 lxc/remote.go:845 lxc/remote.go:937 +#: lxc/remote.go:772 lxc/remote.go:843 lxc/remote.go:935 #, c-format msgid "Remote %s is static and cannot be modified" msgstr "" -#: lxc/remote.go:100 +#: lxc/remote.go:99 msgid "Remote admin password" msgstr "" -#: lxc/remote.go:289 +#: lxc/remote.go:288 msgid "Remote names may not contain colons" msgstr "" -#: lxc/utils/cancel.go:59 -msgid "Remote operation canceled by user" -msgstr "" - -#: lxc/info.go:284 +#: lxc/info.go:283 #, c-format msgid "Removable: %v" msgstr "" @@ -3916,76 +4113,76 @@ msgstr "" msgid "Remove %s (yes/no): " msgstr "" -#: lxc/cluster_group.go:440 +#: lxc/cluster_group.go:456 msgid "Remove a cluster member from a cluster group" msgstr "" -#: lxc/cluster.go:429 lxc/cluster.go:430 +#: lxc/cluster.go:467 lxc/cluster.go:468 msgid "Remove a member from the cluster" msgstr "" -#: lxc/network_zone.go:1209 +#: lxc/network_zone.go:1285 msgid "Remove a network zone record entry" msgstr "" -#: lxc/alias.go:198 lxc/alias.go:199 +#: lxc/alias.go:213 lxc/alias.go:214 msgid "Remove aliases" msgstr "" -#: lxc/network_forward.go:773 +#: lxc/network_forward.go:811 msgid "Remove all ports that match" msgstr "" -#: lxc/network_acl.go:850 +#: lxc/network_acl.go:888 msgid "Remove all rules that match" msgstr "" -#: lxc/network_zone.go:1210 +#: lxc/network_zone.go:1286 msgid "Remove entries from a network zone record" msgstr "" -#: lxc/config_device.go:445 lxc/config_device.go:446 +#: lxc/config_device.go:449 lxc/config_device.go:450 msgid "Remove instance devices" msgstr "" -#: lxc/cluster_group.go:439 +#: lxc/cluster_group.go:455 msgid "Remove member from group" msgstr "" -#: lxc/network_forward.go:771 lxc/network_forward.go:772 +#: lxc/network_forward.go:809 lxc/network_forward.go:810 msgid "Remove ports from a forward" msgstr "" -#: lxc/profile.go:649 lxc/profile.go:650 +#: lxc/profile.go:673 lxc/profile.go:674 msgid "Remove profiles from instances" msgstr "" -#: lxc/remote.go:820 lxc/remote.go:821 +#: lxc/remote.go:818 lxc/remote.go:819 msgid "Remove remotes" msgstr "" -#: lxc/cluster_role.go:101 lxc/cluster_role.go:102 +#: lxc/cluster_role.go:105 lxc/cluster_role.go:106 msgid "Remove roles from a cluster member" msgstr "" -#: lxc/network_acl.go:848 lxc/network_acl.go:849 +#: lxc/network_acl.go:886 lxc/network_acl.go:887 msgid "Remove rules from an ACL" msgstr "" -#: lxc/config_trust.go:527 lxc/config_trust.go:528 +#: lxc/config_trust.go:533 lxc/config_trust.go:534 msgid "Remove trusted client" msgstr "" -#: lxc/cluster_group.go:510 lxc/cluster_group.go:511 +#: lxc/cluster_group.go:528 lxc/cluster_group.go:529 msgid "Rename a cluster group" msgstr "" -#: lxc/cluster.go:379 lxc/cluster.go:380 +#: lxc/cluster.go:417 lxc/cluster.go:418 msgid "Rename a cluster member" msgstr "" -#: lxc/alias.go:147 lxc/alias.go:148 lxc/image_alias.go:255 -#: lxc/image_alias.go:256 +#: lxc/alias.go:158 lxc/alias.go:159 lxc/image_alias.go:254 +#: lxc/image_alias.go:255 msgid "Rename aliases" msgstr "" @@ -3993,45 +4190,45 @@ msgstr "" msgid "Rename instances and snapshots" msgstr "" -#: lxc/network_acl.go:614 lxc/network_acl.go:615 +#: lxc/network_acl.go:652 lxc/network_acl.go:653 msgid "Rename network ACLs" msgstr "" -#: lxc/network.go:1050 lxc/network.go:1051 +#: lxc/network.go:1067 lxc/network.go:1068 msgid "Rename networks" msgstr "" -#: lxc/profile.go:725 lxc/profile.go:726 +#: lxc/profile.go:749 lxc/profile.go:750 msgid "Rename profiles" msgstr "" -#: lxc/project.go:494 lxc/project.go:495 +#: lxc/project.go:509 lxc/project.go:510 msgid "Rename projects" msgstr "" -#: lxc/remote.go:748 lxc/remote.go:749 +#: lxc/remote.go:747 lxc/remote.go:748 msgid "Rename remotes" msgstr "" -#: lxc/storage_volume.go:1593 +#: lxc/storage_volume.go:1637 msgid "Rename storage volumes" msgstr "" -#: lxc/storage_volume.go:1592 +#: lxc/storage_volume.go:1636 msgid "Rename storage volumes and storage volume snapshots" msgstr "" -#: lxc/storage_volume.go:1669 lxc/storage_volume.go:1689 +#: lxc/storage_volume.go:1713 lxc/storage_volume.go:1733 #, c-format msgid "Renamed storage volume from \"%s\" to \"%s\"" msgstr "" -#: lxc/info.go:122 +#: lxc/info.go:121 #, c-format msgid "Render: %s (%s)" msgstr "" -#: lxc/cluster.go:712 lxc/cluster.go:713 +#: lxc/cluster.go:750 lxc/cluster.go:751 msgid "Request a join token for adding a cluster member" msgstr "" @@ -4039,22 +4236,22 @@ msgstr "" msgid "Require user confirmation" msgstr "" -#: lxc/info.go:496 +#: lxc/info.go:495 msgid "Resources:" msgstr "" -#: lxc/action.go:69 +#: lxc/action.go:75 msgid "Restart instances" msgstr "" -#: lxc/action.go:70 +#: lxc/action.go:76 msgid "" "Restart instances\n" "\n" "The opposite of \"lxc pause\" is \"lxc start\"." msgstr "" -#: lxc/cluster.go:1089 lxc/cluster.go:1090 +#: lxc/cluster.go:1132 lxc/cluster.go:1133 msgid "Restore cluster member" msgstr "" @@ -4069,16 +4266,16 @@ msgid "" "If --stateful is passed, then the running state will be restored too." msgstr "" -#: lxc/storage_volume.go:2023 lxc/storage_volume.go:2024 +#: lxc/storage_volume.go:2147 lxc/storage_volume.go:2148 msgid "Restore storage volume snapshots" msgstr "" -#: lxc/cluster.go:1145 +#: lxc/cluster.go:1188 #, c-format msgid "Restoring cluster member: %s" msgstr "" -#: lxc/config_trust.go:101 +#: lxc/config_trust.go:100 msgid "Restrict the certificate to one or more projects" msgstr "" @@ -4086,75 +4283,75 @@ msgstr "" msgid "Retrieve the instance's console log" msgstr "" -#: lxc/init.go:385 +#: lxc/init.go:359 #, c-format msgid "Retrieving image: %s" msgstr "" -#: lxc/config_trust.go:573 lxc/config_trust.go:574 +#: lxc/config_trust.go:579 lxc/config_trust.go:580 msgid "Revoke certificate add token" msgstr "" -#: lxc/cluster.go:885 +#: lxc/cluster.go:928 msgid "Revoke cluster member join token" msgstr "" -#: lxc/action.go:112 +#: lxc/action.go:122 msgid "Run against all instances" msgstr "" -#: lxc/warning.go:214 +#: lxc/warning.go:213 msgid "SEVERITY" msgstr "" -#: lxc/image.go:1058 +#: lxc/image.go:1057 msgid "SIZE" msgstr "" -#: lxc/list.go:570 +#: lxc/list.go:569 msgid "SNAPSHOTS" msgstr "" -#: lxc/storage.go:612 +#: lxc/storage.go:639 msgid "SOURCE" msgstr "" -#: lxc/info.go:137 lxc/info.go:246 +#: lxc/info.go:136 lxc/info.go:245 msgid "SR-IOV information:" msgstr "" -#: lxc/file.go:1212 +#: lxc/file.go:1211 #, c-format msgid "SSH client connected %q" msgstr "" -#: lxc/file.go:1213 +#: lxc/file.go:1212 #, c-format msgid "SSH client disconnected %q" msgstr "" -#: lxc/cluster.go:186 lxc/list.go:571 lxc/network.go:962 -#: lxc/network_peer.go:143 lxc/storage.go:617 +#: lxc/cluster.go:185 lxc/list.go:570 lxc/network.go:979 +#: lxc/network_peer.go:142 lxc/storage.go:644 msgid "STATE" msgstr "" -#: lxc/remote.go:731 +#: lxc/remote.go:730 msgid "STATIC" msgstr "" -#: lxc/operation.go:165 lxc/warning.go:215 +#: lxc/operation.go:164 lxc/warning.go:214 msgid "STATUS" msgstr "" -#: lxc/list.go:556 +#: lxc/list.go:555 msgid "STORAGE POOL" msgstr "" -#: lxc/project.go:475 +#: lxc/project.go:490 msgid "STORAGE VOLUMES" msgstr "" -#: lxc/network.go:849 +#: lxc/network.go:866 msgid "STP" msgstr "" @@ -4162,19 +4359,19 @@ msgstr "" msgid "Send a raw query to LXD" msgstr "" -#: lxc/remote.go:102 +#: lxc/remote.go:101 msgid "Server authentication type (tls or candid)" msgstr "" -#: lxc/remote.go:454 +#: lxc/remote.go:453 msgid "Server certificate NACKed by user" msgstr "" -#: lxc/remote.go:594 +#: lxc/remote.go:593 msgid "Server doesn't trust us after authentication" msgstr "" -#: lxc/remote.go:101 +#: lxc/remote.go:100 msgid "Server protocol (lxd or simplestreams)" msgstr "" @@ -4183,19 +4380,19 @@ msgstr "" msgid "Server version: %s\n" msgstr "" -#: lxc/cluster.go:298 +#: lxc/cluster.go:313 msgid "Set a cluster member's configuration keys" msgstr "" -#: lxc/file.go:977 +#: lxc/file.go:976 msgid "Set authentication user when using SSH SFTP listener" msgstr "" -#: lxc/config_device.go:541 +#: lxc/config_device.go:545 msgid "Set device configuration keys" msgstr "" -#: lxc/config_device.go:544 +#: lxc/config_device.go:548 msgid "" "Set device configuration keys\n" "\n" @@ -4204,7 +4401,7 @@ msgid "" " lxc config device set [:] " msgstr "" -#: lxc/config_device.go:551 +#: lxc/config_device.go:555 msgid "" "Set device configuration keys\n" "\n" @@ -4213,15 +4410,15 @@ msgid "" " lxc profile device set [:] " msgstr "" -#: lxc/image.go:1526 lxc/image.go:1527 +#: lxc/image.go:1525 lxc/image.go:1526 msgid "Set image properties" msgstr "" -#: lxc/config.go:463 +#: lxc/config.go:512 msgid "Set instance or server configuration keys" msgstr "" -#: lxc/config.go:464 +#: lxc/config.go:513 msgid "" "Set instance or server configuration keys\n" "\n" @@ -4230,11 +4427,11 @@ msgid "" " lxc config set [:][] " msgstr "" -#: lxc/network_acl.go:396 +#: lxc/network_acl.go:411 msgid "Set network ACL configuration keys" msgstr "" -#: lxc/network_acl.go:397 +#: lxc/network_acl.go:412 msgid "" "Set network ACL configuration keys\n" "\n" @@ -4243,11 +4440,11 @@ msgid "" " lxc network set [:] " msgstr "" -#: lxc/network.go:1100 +#: lxc/network.go:1119 msgid "Set network configuration keys" msgstr "" -#: lxc/network.go:1101 +#: lxc/network.go:1120 msgid "" "Set network configuration keys\n" "\n" @@ -4256,11 +4453,11 @@ msgid "" " lxc network set [:] " msgstr "" -#: lxc/network_forward.go:382 +#: lxc/network_forward.go:397 msgid "Set network forward keys" msgstr "" -#: lxc/network_forward.go:383 +#: lxc/network_forward.go:398 msgid "" "Set network forward keys\n" "\n" @@ -4269,11 +4466,11 @@ msgid "" " lxc network set [:] " msgstr "" -#: lxc/network_peer.go:384 +#: lxc/network_peer.go:398 msgid "Set network peer keys" msgstr "" -#: lxc/network_peer.go:385 +#: lxc/network_peer.go:399 msgid "" "Set network peer keys\n" "\n" @@ -4282,11 +4479,11 @@ msgid "" " lxc network set [:] " msgstr "" -#: lxc/network_zone.go:339 +#: lxc/network_zone.go:353 msgid "Set network zone configuration keys" msgstr "" -#: lxc/network_zone.go:340 +#: lxc/network_zone.go:354 msgid "" "Set network zone configuration keys\n" "\n" @@ -4295,15 +4492,15 @@ msgid "" " lxc network set [:] " msgstr "" -#: lxc/network_zone.go:898 lxc/network_zone.go:899 +#: lxc/network_zone.go:951 lxc/network_zone.go:952 msgid "Set network zone record configuration keys" msgstr "" -#: lxc/profile.go:775 +#: lxc/profile.go:801 msgid "Set profile configuration keys" msgstr "" -#: lxc/profile.go:776 +#: lxc/profile.go:802 msgid "" "Set profile configuration keys\n" "\n" @@ -4312,11 +4509,11 @@ msgid "" " lxc profile set [:] " msgstr "" -#: lxc/project.go:549 +#: lxc/project.go:566 msgid "Set project configuration keys" msgstr "" -#: lxc/project.go:550 +#: lxc/project.go:567 msgid "" "Set project configuration keys\n" "\n" @@ -4325,11 +4522,11 @@ msgid "" " lxc project set [:] " msgstr "" -#: lxc/storage.go:631 +#: lxc/storage.go:660 msgid "Set storage pool configuration keys" msgstr "" -#: lxc/storage.go:632 +#: lxc/storage.go:661 msgid "" "Set storage pool configuration keys\n" "\n" @@ -4338,36 +4535,84 @@ msgid "" " lxc storage set [:] " msgstr "" -#: lxc/storage_volume.go:1705 +#: lxc/storage_volume.go:1751 msgid "Set storage volume configuration keys" msgstr "" -#: lxc/storage_volume.go:1706 +#: lxc/storage_volume.go:1752 msgid "" "Set storage volume configuration keys\n" "\n" "For backward compatibility, a single configuration key may still be set " "with:\n" -" lxc storage volume set [:] " +" lxc storage volume set [:] [/] " msgstr "" -#: lxc/remote.go:912 lxc/remote.go:913 +#: lxc/remote.go:910 lxc/remote.go:911 msgid "Set the URL for the remote" msgstr "" -#: lxc/file.go:469 +#: lxc/file.go:468 msgid "Set the file's gid on push" msgstr "" -#: lxc/file.go:470 +#: lxc/file.go:469 msgid "Set the file's perms on push" msgstr "" -#: lxc/file.go:468 +#: lxc/file.go:467 msgid "Set the file's uid on push" msgstr "" -#: lxc/file.go:975 +#: lxc/cluster.go:316 +msgid "Set the key as a cluster property" +msgstr "" + +#: lxc/network_acl.go:418 +msgid "Set the key as a network ACL property" +msgstr "" + +#: lxc/network_forward.go:405 +msgid "Set the key as a network forward property" +msgstr "" + +#: lxc/network_peer.go:406 +msgid "Set the key as a network peer property" +msgstr "" + +#: lxc/network.go:1127 +msgid "Set the key as a network property" +msgstr "" + +#: lxc/network_zone.go:361 +msgid "Set the key as a network zone property" +msgstr "" + +#: lxc/network_zone.go:957 +msgid "Set the key as a network zone record property" +msgstr "" + +#: lxc/profile.go:809 +msgid "Set the key as a profile property" +msgstr "" + +#: lxc/project.go:574 +msgid "Set the key as a project property" +msgstr "" + +#: lxc/storage.go:668 +msgid "Set the key as a storage property" +msgstr "" + +#: lxc/storage_volume.go:1768 +msgid "Set the key as a storage volume property" +msgstr "" + +#: lxc/config.go:529 +msgid "Set the key as an instance property" +msgstr "" + +#: lxc/file.go:974 msgid "Setup SSH SFTP listener on address:port instead of mounting" msgstr "" @@ -4379,19 +4624,19 @@ msgstr "" msgid "Show all information messages" msgstr "" -#: lxc/cluster_group.go:556 lxc/cluster_group.go:557 +#: lxc/cluster_group.go:576 lxc/cluster_group.go:577 msgid "Show cluster group configurations" msgstr "" -#: lxc/config_template.go:300 lxc/config_template.go:301 +#: lxc/config_template.go:299 lxc/config_template.go:300 msgid "Show content of instance file templates" msgstr "" -#: lxc/cluster.go:202 lxc/cluster.go:203 +#: lxc/cluster.go:201 lxc/cluster.go:202 msgid "Show details of a cluster member" msgstr "" -#: lxc/operation.go:184 lxc/operation.go:185 +#: lxc/operation.go:183 lxc/operation.go:184 msgid "Show details on a background operation" msgstr "" @@ -4399,11 +4644,11 @@ msgstr "" msgid "Show events from all projects" msgstr "" -#: lxc/config_device.go:663 lxc/config_device.go:664 +#: lxc/config_device.go:667 lxc/config_device.go:668 msgid "Show full device configuration" msgstr "" -#: lxc/image.go:1415 lxc/image.go:1416 +#: lxc/image.go:1414 lxc/image.go:1415 msgid "Show image properties" msgstr "" @@ -4411,11 +4656,11 @@ msgstr "" msgid "Show instance metadata files" msgstr "" -#: lxc/config.go:621 lxc/config.go:622 +#: lxc/config.go:729 lxc/config.go:730 msgid "Show instance or server configurations" msgstr "" -#: lxc/info.go:33 lxc/info.go:34 +#: lxc/info.go:32 lxc/info.go:33 msgid "Show instance or server information" msgstr "" @@ -4427,162 +4672,162 @@ msgstr "" msgid "Show local and remote versions" msgstr "" -#: lxc/network_acl.go:165 lxc/network_acl.go:166 +#: lxc/network_acl.go:164 lxc/network_acl.go:165 msgid "Show network ACL configurations" msgstr "" -#: lxc/network_acl.go:218 lxc/network_acl.go:219 +#: lxc/network_acl.go:217 lxc/network_acl.go:218 msgid "Show network ACL log" msgstr "" -#: lxc/network.go:1170 lxc/network.go:1171 +#: lxc/network.go:1207 lxc/network.go:1208 msgid "Show network configurations" msgstr "" -#: lxc/network_forward.go:167 lxc/network_forward.go:168 +#: lxc/network_forward.go:166 lxc/network_forward.go:167 msgid "Show network forward configurations" msgstr "" -#: lxc/network_peer.go:158 lxc/network_peer.go:159 +#: lxc/network_peer.go:157 lxc/network_peer.go:158 msgid "Show network peer configurations" msgstr "" -#: lxc/network_zone.go:156 lxc/network_zone.go:157 +#: lxc/network_zone.go:155 lxc/network_zone.go:156 msgid "Show network zone configurations" msgstr "" -#: lxc/network_zone.go:720 +#: lxc/network_zone.go:758 msgid "Show network zone record configuration" msgstr "" -#: lxc/network_zone.go:721 +#: lxc/network_zone.go:759 msgid "Show network zone record configurations" msgstr "" -#: lxc/profile.go:834 lxc/profile.go:835 +#: lxc/profile.go:877 lxc/profile.go:878 msgid "Show profile configurations" msgstr "" -#: lxc/project.go:638 lxc/project.go:639 +#: lxc/project.go:676 lxc/project.go:677 msgid "Show project options" msgstr "" -#: lxc/storage.go:705 lxc/storage.go:706 +#: lxc/storage.go:752 lxc/storage.go:753 msgid "Show storage pool configurations and resources" msgstr "" -#: lxc/storage_volume.go:1793 lxc/storage_volume.go:1794 +#: lxc/storage_volume.go:1895 lxc/storage_volume.go:1896 msgid "Show storage volume configurations" msgstr "" -#: lxc/storage_volume.go:1133 lxc/storage_volume.go:1134 +#: lxc/storage_volume.go:1170 lxc/storage_volume.go:1171 msgid "Show storage volume state information" msgstr "" -#: lxc/remote.go:624 lxc/remote.go:625 +#: lxc/remote.go:623 lxc/remote.go:624 msgid "Show the default remote" msgstr "" -#: lxc/config.go:625 +#: lxc/config.go:733 msgid "Show the expanded configuration" msgstr "" -#: lxc/info.go:44 +#: lxc/info.go:43 msgid "Show the instance's last 100 log lines?" msgstr "" -#: lxc/info.go:45 +#: lxc/info.go:44 msgid "Show the resources available to the server" msgstr "" -#: lxc/storage.go:709 +#: lxc/storage.go:756 msgid "Show the resources available to the storage pool" msgstr "" -#: lxc/storage.go:402 +#: lxc/storage.go:413 msgid "Show the used and free space in bytes" msgstr "" -#: lxc/config_trust.go:644 lxc/config_trust.go:645 +#: lxc/config_trust.go:650 lxc/config_trust.go:651 msgid "Show trust configurations" msgstr "" -#: lxc/image.go:881 lxc/image.go:882 +#: lxc/image.go:880 lxc/image.go:881 msgid "Show useful information about images" msgstr "" -#: lxc/storage.go:398 lxc/storage.go:399 +#: lxc/storage.go:409 lxc/storage.go:410 msgid "Show useful information about storage pools" msgstr "" -#: lxc/warning.go:303 lxc/warning.go:304 +#: lxc/warning.go:302 lxc/warning.go:303 msgid "Show warning" msgstr "" -#: lxc/image.go:942 +#: lxc/image.go:941 #, c-format msgid "Size: %.2fMiB" msgstr "" -#: lxc/info.go:277 lxc/info.go:293 +#: lxc/info.go:276 lxc/info.go:292 #, c-format msgid "Size: %s" msgstr "" -#: lxc/storage_volume.go:1926 lxc/storage_volume.go:1927 +#: lxc/storage_volume.go:2050 lxc/storage_volume.go:2051 msgid "Snapshot storage volumes" msgstr "" -#: lxc/storage_volume.go:1756 +#: lxc/storage_volume.go:1841 msgid "Snapshots are read-only and can't have their configuration changed" msgstr "" -#: lxc/info.go:598 lxc/storage_volume.go:1248 +#: lxc/info.go:597 lxc/storage_volume.go:1292 msgid "Snapshots:" msgstr "" -#: lxc/info.go:360 +#: lxc/info.go:359 #, c-format msgid "Socket %d:" msgstr "" -#: lxc/action.go:386 +#: lxc/action.go:402 #, c-format msgid "Some instances failed to %s" msgstr "" -#: lxc/image.go:985 +#: lxc/image.go:984 msgid "Source:" msgstr "" -#: lxc/action.go:30 lxc/action.go:31 +#: lxc/action.go:31 lxc/action.go:32 msgid "Start instances" msgstr "" -#: lxc/launch.go:79 +#: lxc/launch.go:78 #, c-format msgid "Starting %s" msgstr "" -#: lxc/info.go:555 +#: lxc/info.go:554 msgid "State" msgstr "" -#: lxc/network.go:811 +#: lxc/network.go:828 #, c-format msgid "State: %s" msgstr "" -#: lxc/info.go:632 +#: lxc/info.go:631 msgid "Stateful" msgstr "" -#: lxc/info.go:465 +#: lxc/info.go:464 #, c-format msgid "Status: %s" msgstr "" -#: lxc/action.go:90 lxc/action.go:91 +#: lxc/action.go:98 lxc/action.go:99 msgid "Stop instances" msgstr "" @@ -4590,126 +4835,126 @@ msgstr "" msgid "Stop the instance if currently running" msgstr "" -#: lxc/publish.go:140 +#: lxc/publish.go:141 msgid "Stopping instance failed!" msgstr "" -#: lxc/delete.go:129 +#: lxc/delete.go:134 #, c-format msgid "Stopping the instance failed: %s" msgstr "" -#: lxc/storage.go:149 +#: lxc/storage.go:148 #, c-format msgid "Storage pool %s created" msgstr "" -#: lxc/storage.go:201 +#: lxc/storage.go:200 #, c-format msgid "Storage pool %s deleted" msgstr "" -#: lxc/storage.go:147 +#: lxc/storage.go:146 #, c-format msgid "Storage pool %s pending on member %s" msgstr "" -#: lxc/copy.go:60 lxc/import.go:36 lxc/init.go:54 lxc/move.go:65 +#: lxc/copy.go:59 lxc/import.go:35 lxc/init.go:53 lxc/move.go:64 msgid "Storage pool name" msgstr "" -#: lxc/storage_volume.go:586 +#: lxc/storage_volume.go:591 #, c-format msgid "Storage volume %s created" msgstr "" -#: lxc/storage_volume.go:662 +#: lxc/storage_volume.go:667 #, c-format msgid "Storage volume %s deleted" msgstr "" -#: lxc/storage_volume.go:429 +#: lxc/storage_volume.go:434 msgid "Storage volume copied successfully!" msgstr "" -#: lxc/storage_volume.go:433 +#: lxc/storage_volume.go:438 msgid "Storage volume moved successfully!" msgstr "" -#: lxc/action.go:115 +#: lxc/action.go:125 msgid "Store the instance state" msgstr "" -#: lxc/cluster.go:1044 +#: lxc/cluster.go:1087 #, c-format msgid "Successfully updated cluster certificates for remote %s" msgstr "" -#: lxc/info.go:206 +#: lxc/info.go:205 #, c-format msgid "Supported modes: %s" msgstr "" -#: lxc/info.go:210 +#: lxc/info.go:209 #, c-format msgid "Supported ports: %s" msgstr "" -#: lxc/info.go:537 +#: lxc/info.go:536 msgid "Swap (current)" msgstr "" -#: lxc/info.go:541 +#: lxc/info.go:540 msgid "Swap (peak)" msgstr "" -#: lxc/project.go:691 lxc/project.go:692 +#: lxc/project.go:729 lxc/project.go:730 msgid "Switch the current project" msgstr "" -#: lxc/remote.go:874 lxc/remote.go:875 +#: lxc/remote.go:872 lxc/remote.go:873 msgid "Switch the default remote" msgstr "" -#: lxc/alias.go:131 +#: lxc/alias.go:140 msgid "TARGET" msgstr "" -#: lxc/cluster.go:869 lxc/config_trust.go:509 +#: lxc/cluster.go:912 lxc/config_trust.go:515 msgid "TOKEN" msgstr "" -#: lxc/config_trust.go:402 lxc/image.go:1060 lxc/image_alias.go:237 -#: lxc/list.go:572 lxc/network.go:956 lxc/network.go:1030 lxc/operation.go:163 -#: lxc/storage_volume.go:1457 lxc/warning.go:216 +#: lxc/config_trust.go:408 lxc/image.go:1059 lxc/image_alias.go:236 +#: lxc/list.go:571 lxc/network.go:973 lxc/network.go:1047 lxc/operation.go:162 +#: lxc/storage_volume.go:1501 lxc/warning.go:215 msgid "TYPE" msgstr "" -#: lxc/info.go:630 lxc/info.go:681 lxc/storage_volume.go:1320 +#: lxc/info.go:629 lxc/info.go:680 lxc/storage_volume.go:1364 msgid "Taken at" msgstr "" -#: lxc/file.go:1014 +#: lxc/file.go:1013 msgid "Target path and --listen flag cannot be used together" msgstr "" -#: lxc/file.go:1008 +#: lxc/file.go:1007 msgid "Target path must be a directory" msgstr "" -#: lxc/move.go:168 +#: lxc/move.go:167 msgid "The --instance-only flag can't be used with --target" msgstr "" -#: lxc/move.go:205 +#: lxc/move.go:204 msgid "The --mode flag can't be used with --storage" msgstr "" -#: lxc/move.go:180 +#: lxc/move.go:179 msgid "The --mode flag can't be used with --target" msgstr "" -#: lxc/move.go:221 +#: lxc/move.go:220 msgid "The --mode flag can't be used with --target-project" msgstr "" @@ -4717,85 +4962,160 @@ msgstr "" msgid "The --show-log flag is only supported for by 'console' output type" msgstr "" -#: lxc/move.go:172 +#: lxc/move.go:171 msgid "The --storage flag can't be used with --target" msgstr "" -#: lxc/move.go:176 +#: lxc/move.go:175 msgid "The --target-project flag can't be used with --target" msgstr "" -#: lxc/move.go:192 +#: lxc/move.go:191 msgid "The destination LXD server is not clustered" msgstr "" -#: lxc/config_device.go:147 lxc/config_device.go:164 lxc/config_device.go:388 +#: lxc/config_device.go:151 lxc/config_device.go:168 lxc/config_device.go:392 msgid "The device already exists" msgstr "" -#: lxc/network_acl.go:839 lxc/network_acl.go:961 +#: lxc/network_acl.go:877 lxc/network_acl.go:999 msgid "The direction argument must be one of: ingress, egress" msgstr "" -#: lxc/delete.go:113 +#: lxc/delete.go:118 msgid "The instance is currently running, stop it first or pass --force" msgstr "" -#: lxc/publish.go:109 +#: lxc/publish.go:110 msgid "" "The instance is currently running. Use --force to have it stopped and " "restarted" msgstr "" -#: lxc/init.go:489 +#: lxc/init.go:433 msgid "The instance you are starting doesn't have any network attached to it." msgstr "" -#: lxc/cluster.go:282 +#: lxc/config.go:631 +msgid "The is no config key to set on an instance snapshot." +msgstr "" + +#: lxc/cluster.go:295 #, c-format msgid "The key %q does not exist on cluster member %q" msgstr "" -#: lxc/init.go:473 +#: lxc/utils.go:347 #, c-format -msgid "The local image '%s' couldn't be found, trying '%s:%s' instead." +msgid "The local image '%q' couldn't be found, trying '%q:%q' instead." msgstr "" -#: lxc/init.go:469 +#: lxc/utils.go:343 #, c-format -msgid "The local image '%s' couldn't be found, trying '%s:' instead." +msgid "The local image '%q' couldn't be found, trying '%q:' instead." msgstr "" -#: lxc/config_device.go:393 +#: lxc/config_device.go:397 msgid "The profile device doesn't exist" msgstr "" -#: lxc/info.go:345 +#: lxc/cluster.go:286 +#, c-format +msgid "The property %q does not exist on the cluster member %q: %v" +msgstr "" + +#: lxc/config.go:454 +#, c-format +msgid "The property %q does not exist on the instance %q: %v" +msgstr "" + +#: lxc/config.go:430 +#, c-format +msgid "The property %q does not exist on the instance snapshot %s/%s: %v" +msgstr "" + +#: lxc/network.go:756 +#, c-format +msgid "The property %q does not exist on the network %q: %v" +msgstr "" + +#: lxc/network_acl.go:302 +#, c-format +msgid "The property %q does not exist on the network ACL %q: %v" +msgstr "" + +#: lxc/network_forward.go:371 +#, c-format +msgid "The property %q does not exist on the network forward %q: %v" +msgstr "" + +#: lxc/network_peer.go:372 +#, c-format +msgid "The property %q does not exist on the network peer %q: %v" +msgstr "" + +#: lxc/network_zone.go:246 +#, c-format +msgid "The property %q does not exist on the network zone %q: %v" +msgstr "" + +#: lxc/network_zone.go:845 +#, c-format +msgid "The property %q does not exist on the network zone record %q: %v" +msgstr "" + +#: lxc/profile.go:590 +#, c-format +msgid "The property %q does not exist on the profile %q: %v" +msgstr "" + +#: lxc/project.go:385 +#, c-format +msgid "The property %q does not exist on the project %q: %v" +msgstr "" + +#: lxc/storage.go:384 +#, c-format +msgid "The property %q does not exist on the storage pool %q: %v" +msgstr "" + +#: lxc/storage_volume.go:1146 +#, c-format +msgid "The property %q does not exist on the storage pool volume %q: %v" +msgstr "" + +#: lxc/storage_volume.go:1123 +#, c-format +msgid "" +"The property %q does not exist on the storage pool volume snapshot %s/%s: %v" +msgstr "" + +#: lxc/info.go:344 msgid "The server doesn't implement the newer v2 resources API" msgstr "" -#: lxc/move.go:290 +#: lxc/move.go:289 msgid "The source LXD server is not clustered" msgstr "" -#: lxc/network.go:468 lxc/network.go:553 lxc/storage_volume.go:737 -#: lxc/storage_volume.go:818 +#: lxc/network.go:472 lxc/network.go:557 lxc/storage_volume.go:742 +#: lxc/storage_volume.go:823 msgid "The specified device doesn't exist" msgstr "" -#: lxc/network.go:472 lxc/network.go:557 +#: lxc/network.go:476 lxc/network.go:561 msgid "The specified device doesn't match the network" msgstr "" -#: lxc/publish.go:82 +#: lxc/publish.go:83 msgid "There is no \"image name\". Did you want an alias?" msgstr "" -#: lxc/cluster.go:567 +#: lxc/cluster.go:605 msgid "This LXD server is already clustered" msgstr "" -#: lxc/cluster.go:557 +#: lxc/cluster.go:595 msgid "This LXD server is not available on the network" msgstr "" @@ -4811,23 +5131,23 @@ msgid "" "https://multipass.run" msgstr "" -#: lxc/info.go:318 +#: lxc/info.go:317 msgid "Threads:" msgstr "" -#: lxc/action.go:127 +#: lxc/action.go:137 msgid "Time to wait for the instance to shutdown cleanly" msgstr "" -#: lxc/image.go:946 +#: lxc/image.go:945 msgid "Timestamps:" msgstr "" -#: lxc/init.go:491 +#: lxc/init.go:435 msgid "To attach a network to an instance, use: lxc network attach" msgstr "" -#: lxc/init.go:490 +#: lxc/init.go:434 msgid "To create a new network, use: lxc network create" msgstr "" @@ -4841,70 +5161,70 @@ msgid "" "Or for a virtual machine: lxc launch ubuntu:22.04 --vm" msgstr "" -#: lxc/config.go:288 lxc/config.go:428 lxc/config.go:578 lxc/config.go:664 -#: lxc/copy.go:129 lxc/info.go:337 lxc/network.go:796 lxc/storage.go:431 +#: lxc/config.go:293 lxc/config.go:474 lxc/config.go:681 lxc/config.go:773 +#: lxc/copy.go:128 lxc/info.go:336 lxc/network.go:813 lxc/storage.go:442 msgid "To use --target, the destination remote must be a cluster" msgstr "" -#: lxc/storage_volume.go:1237 +#: lxc/storage_volume.go:1281 #, c-format msgid "Total: %s" msgstr "" -#: lxc/info.go:371 lxc/info.go:382 lxc/info.go:387 lxc/info.go:393 +#: lxc/info.go:370 lxc/info.go:381 lxc/info.go:386 lxc/info.go:392 #, c-format msgid "Total: %v" msgstr "" -#: lxc/info.go:218 +#: lxc/info.go:217 #, c-format msgid "Transceiver type: %s" msgstr "" -#: lxc/storage_volume.go:1563 +#: lxc/storage_volume.go:1607 msgid "Transfer mode, one of pull (default), push or relay" msgstr "" -#: lxc/image.go:156 +#: lxc/image.go:155 msgid "Transfer mode. One of pull (default), push or relay" msgstr "" -#: lxc/storage_volume.go:333 +#: lxc/storage_volume.go:334 msgid "Transfer mode. One of pull (default), push or relay." msgstr "" -#: lxc/copy.go:57 +#: lxc/copy.go:56 msgid "Transfer mode. One of pull, push or relay" msgstr "" -#: lxc/move.go:63 +#: lxc/move.go:62 msgid "Transfer mode. One of pull, push or relay." msgstr "" -#: lxc/image.go:765 +#: lxc/image.go:764 #, c-format msgid "Transferring image: %s" msgstr "" -#: lxc/copy.go:341 lxc/move.go:308 +#: lxc/copy.go:336 lxc/move.go:307 #, c-format msgid "Transferring instance: %s" msgstr "" -#: lxc/network.go:836 +#: lxc/network.go:853 msgid "Transmit policy" msgstr "" -#: lxc/action.go:274 lxc/launch.go:111 +#: lxc/action.go:288 lxc/launch.go:110 #, c-format msgid "Try `lxc info --show-log %s` for more info" msgstr "" -#: lxc/info.go:554 +#: lxc/info.go:553 msgid "Type" msgstr "" -#: lxc/config_trust.go:104 +#: lxc/config_trust.go:103 msgid "Type of certificate" msgstr "" @@ -4914,69 +5234,69 @@ msgid "" "SPICE graphical output" msgstr "" -#: lxc/image.go:944 lxc/info.go:274 lxc/info.go:474 lxc/network.go:812 -#: lxc/storage_volume.go:1222 +#: lxc/image.go:943 lxc/info.go:273 lxc/info.go:473 lxc/network.go:829 +#: lxc/storage_volume.go:1266 #, c-format msgid "Type: %s" msgstr "" -#: lxc/info.go:472 +#: lxc/info.go:471 #, c-format msgid "Type: %s (ephemeral)" msgstr "" -#: lxc/project.go:789 +#: lxc/project.go:827 msgid "UNLIMITED" msgstr "" -#: lxc/image.go:1059 +#: lxc/image.go:1058 msgid "UPLOAD DATE" msgstr "" -#: lxc/cluster.go:181 lxc/remote.go:727 +#: lxc/cluster.go:180 lxc/remote.go:726 msgid "URL" msgstr "" -#: lxc/project.go:813 lxc/storage_volume.go:1462 +#: lxc/project.go:851 lxc/storage_volume.go:1506 msgid "USAGE" msgstr "" -#: lxc/network.go:961 lxc/network_acl.go:150 lxc/network_zone.go:141 -#: lxc/profile.go:635 lxc/project.go:478 lxc/storage.go:616 -#: lxc/storage_volume.go:1461 +#: lxc/network.go:978 lxc/network_acl.go:149 lxc/network_zone.go:140 +#: lxc/profile.go:659 lxc/project.go:493 lxc/storage.go:643 +#: lxc/storage_volume.go:1505 msgid "USED BY" msgstr "" -#: lxc/warning.go:217 +#: lxc/warning.go:216 msgid "UUID" msgstr "" -#: lxc/info.go:133 +#: lxc/info.go:132 #, c-format msgid "UUID: %v" msgstr "" -#: lxc/file.go:193 +#: lxc/file.go:192 #, c-format msgid "Unable to create a temporary file: %v" msgstr "" -#: lxc/remote.go:212 lxc/remote.go:246 +#: lxc/remote.go:211 lxc/remote.go:245 msgid "Unavailable remote server" msgstr "" -#: lxc/config_trust.go:120 +#: lxc/config_trust.go:119 #, c-format msgid "Unknown certificate type %q" msgstr "" -#: lxc/file.go:1235 +#: lxc/file.go:1234 #, c-format msgid "Unknown channel type for client %q: %s" msgstr "" -#: lxc/image.go:1076 lxc/list.go:630 lxc/storage_volume.go:1495 -#: lxc/warning.go:244 +#: lxc/image.go:1075 lxc/list.go:629 lxc/storage_volume.go:1539 +#: lxc/warning.go:243 #, c-format msgid "Unknown column shorthand char '%c' in '%s'" msgstr "" @@ -4986,12 +5306,12 @@ msgstr "" msgid "Unknown console type %q" msgstr "" -#: lxc/file.go:789 +#: lxc/file.go:788 #, c-format msgid "Unknown file type '%s'" msgstr "" -#: lxc/network_acl.go:776 lxc/network_acl.go:895 +#: lxc/network_acl.go:814 lxc/network_acl.go:933 #, c-format msgid "Unknown key: %s" msgstr "" @@ -5001,108 +5321,160 @@ msgstr "" msgid "Unknown output type %q" msgstr "" -#: lxc/cluster.go:350 +#: lxc/cluster.go:385 msgid "Unset a cluster member's configuration keys" msgstr "" -#: lxc/move.go:61 +#: lxc/move.go:60 msgid "Unset all profiles on the target instance" msgstr "" -#: lxc/config_device.go:736 lxc/config_device.go:737 +#: lxc/config_device.go:740 lxc/config_device.go:741 msgid "Unset device configuration keys" msgstr "" -#: lxc/image.go:1582 lxc/image.go:1583 +#: lxc/image.go:1581 lxc/image.go:1582 msgid "Unset image properties" msgstr "" -#: lxc/config.go:741 lxc/config.go:742 +#: lxc/config.go:853 lxc/config.go:854 msgid "Unset instance or server configuration keys" msgstr "" -#: lxc/network_acl.go:456 lxc/network_acl.go:457 +#: lxc/network_acl.go:491 lxc/network_acl.go:492 msgid "Unset network ACL configuration keys" msgstr "" -#: lxc/network.go:1232 lxc/network.go:1233 +#: lxc/network.go:1271 lxc/network.go:1272 msgid "Unset network configuration keys" msgstr "" -#: lxc/network_forward.go:460 +#: lxc/network_forward.go:495 msgid "Unset network forward configuration keys" msgstr "" -#: lxc/network_forward.go:461 +#: lxc/network_forward.go:496 msgid "Unset network forward keys" msgstr "" -#: lxc/network_peer.go:453 +#: lxc/network_peer.go:487 msgid "Unset network peer configuration keys" msgstr "" -#: lxc/network_peer.go:454 +#: lxc/network_peer.go:488 msgid "Unset network peer keys" msgstr "" -#: lxc/network_zone.go:399 lxc/network_zone.go:400 +#: lxc/network_zone.go:433 lxc/network_zone.go:434 msgid "Unset network zone configuration keys" msgstr "" -#: lxc/network_zone.go:954 lxc/network_zone.go:955 +#: lxc/network_zone.go:1027 lxc/network_zone.go:1028 msgid "Unset network zone record configuration keys" msgstr "" -#: lxc/profile.go:888 lxc/profile.go:889 +#: lxc/profile.go:933 lxc/profile.go:934 msgid "Unset profile configuration keys" msgstr "" -#: lxc/project.go:609 lxc/project.go:610 +#: lxc/project.go:645 lxc/project.go:646 msgid "Unset project configuration keys" msgstr "" -#: lxc/storage.go:789 lxc/storage.go:790 +#: lxc/storage.go:838 lxc/storage.go:839 msgid "Unset storage pool configuration keys" msgstr "" -#: lxc/storage_volume.go:1892 lxc/storage_volume.go:1893 +#: lxc/storage_volume.go:2004 lxc/storage_volume.go:2005 msgid "Unset storage volume configuration keys" msgstr "" -#: lxc/info.go:703 +#: lxc/cluster.go:388 +msgid "Unset the key as a cluster property" +msgstr "" + +#: lxc/network_acl.go:495 +msgid "Unset the key as a network ACL property" +msgstr "" + +#: lxc/network_forward.go:499 +msgid "Unset the key as a network forward property" +msgstr "" + +#: lxc/network_peer.go:491 +msgid "Unset the key as a network peer property" +msgstr "" + +#: lxc/network.go:1276 +msgid "Unset the key as a network property" +msgstr "" + +#: lxc/network_zone.go:437 +msgid "Unset the key as a network zone property" +msgstr "" + +#: lxc/network_zone.go:1031 +msgid "Unset the key as a network zone record property" +msgstr "" + +#: lxc/profile.go:938 +msgid "Unset the key as a profile property" +msgstr "" + +#: lxc/project.go:650 +msgid "Unset the key as a project property" +msgstr "" + +#: lxc/storage.go:843 +msgid "Unset the key as a storage property" +msgstr "" + +#: lxc/storage_volume.go:2018 +msgid "Unset the key as a storage volume property" +msgstr "" + +#: lxc/config.go:858 +msgid "Unset the key as an instance property" +msgstr "" + +#: lxc/info.go:702 #, c-format msgid "Unsupported instance type: %s" msgstr "" -#: lxc/network.go:837 +#: lxc/network.go:854 msgid "Up delay" msgstr "" -#: lxc/cluster.go:964 +#: lxc/cluster.go:1007 msgid "Update cluster certificate" msgstr "" -#: lxc/cluster.go:966 +#: lxc/cluster.go:1009 msgid "" "Update cluster certificate with PEM certificate and key read from input " "files." msgstr "" -#: lxc/image.go:953 +#: lxc/profile.go:253 +msgid "Update the target profile from the source if it already exists" +msgstr "" + +#: lxc/image.go:952 #, c-format msgid "Uploaded: %s" msgstr "" -#: lxc/network.go:853 +#: lxc/network.go:870 msgid "Upper devices" msgstr "" -#: lxc/storage_volume.go:1235 +#: lxc/storage_volume.go:1279 #, c-format msgid "Usage: %s" msgstr "" -#: lxc/export.go:42 lxc/storage_volume.go:2096 +#: lxc/export.go:42 lxc/storage_volume.go:2220 msgid "" "Use storage driver optimized format (can only be restored on a similar pool)" msgstr "" @@ -5111,7 +5483,7 @@ msgstr "" msgid "Use with help or --help to view sub-commands" msgstr "" -#: lxc/info.go:370 lxc/info.go:381 lxc/info.go:386 lxc/info.go:392 +#: lxc/info.go:369 lxc/info.go:380 lxc/info.go:385 lxc/info.go:391 #, c-format msgid "Used: %v" msgstr "" @@ -5120,52 +5492,52 @@ msgstr "" msgid "User ID to run the command as (default 0)" msgstr "" -#: lxc/cluster.go:461 lxc/delete.go:48 +#: lxc/cluster.go:499 lxc/delete.go:48 msgid "User aborted delete operation" msgstr "" -#: lxc/file.go:67 lxc/utils/cancel.go:65 +#: lxc/file.go:66 msgid "" "User signaled us three times, exiting. The remote operation will keep running" msgstr "" -#: lxc/info.go:141 lxc/info.go:250 +#: lxc/info.go:140 lxc/info.go:249 #, c-format msgid "VFs: %d" msgstr "" -#: lxc/network.go:861 +#: lxc/network.go:878 msgid "VLAN ID" msgstr "" -#: lxc/network.go:852 +#: lxc/network.go:869 msgid "VLAN filtering" msgstr "" -#: lxc/network.go:859 +#: lxc/network.go:876 msgid "VLAN:" msgstr "" -#: lxc/info.go:300 +#: lxc/info.go:299 #, c-format msgid "Vendor: %v" msgstr "" -#: lxc/info.go:94 lxc/info.go:180 +#: lxc/info.go:93 lxc/info.go:179 #, c-format msgid "Vendor: %v (%v)" msgstr "" -#: lxc/info.go:239 +#: lxc/info.go:238 #, c-format msgid "Verb: %s (%s)" msgstr "" -#: lxc/storage_volume.go:1322 +#: lxc/storage_volume.go:1366 msgid "Volume Only" msgstr "" -#: lxc/info.go:280 +#: lxc/info.go:279 #, c-format msgid "WWN: %s" msgstr "" @@ -5188,9 +5560,9 @@ msgstr "" msgid "Whether or not to snapshot the instance's running state" msgstr "" -#: lxc/network.go:934 lxc/operation.go:148 lxc/project.go:442 -#: lxc/project.go:447 lxc/project.go:452 lxc/project.go:457 lxc/remote.go:684 -#: lxc/remote.go:689 lxc/remote.go:694 +#: lxc/network.go:951 lxc/operation.go:147 lxc/project.go:457 +#: lxc/project.go:462 lxc/project.go:467 lxc/project.go:472 lxc/remote.go:683 +#: lxc/remote.go:688 lxc/remote.go:693 msgid "YES" msgstr "" @@ -5202,76 +5574,76 @@ msgstr "" msgid "You can't pass -t or -T at the same time as --mode" msgstr "" -#: lxc/copy.go:90 +#: lxc/copy.go:89 msgid "You must specify a destination instance name when using --target" msgstr "" -#: lxc/copy.go:85 lxc/move.go:274 lxc/move.go:350 lxc/move.go:402 +#: lxc/copy.go:84 lxc/move.go:273 lxc/move.go:349 lxc/move.go:401 msgid "You must specify a source instance name" msgstr "" -#: lxc/storage_volume.go:759 +#: lxc/storage_volume.go:764 msgid "[] []" msgstr "" -#: lxc/storage_volume.go:237 +#: lxc/storage_volume.go:238 msgid "[] [] []" msgstr "" -#: lxc/cluster.go:116 lxc/cluster.go:782 lxc/cluster_group.go:365 -#: lxc/config_trust.go:341 lxc/config_trust.go:424 lxc/monitor.go:31 -#: lxc/network.go:884 lxc/network_acl.go:92 lxc/network_zone.go:83 -#: lxc/operation.go:102 lxc/profile.go:586 lxc/project.go:391 -#: lxc/storage.go:552 lxc/version.go:20 lxc/warning.go:69 +#: lxc/cluster.go:115 lxc/cluster.go:825 lxc/cluster_group.go:379 +#: lxc/config_trust.go:347 lxc/config_trust.go:430 lxc/monitor.go:31 +#: lxc/network.go:901 lxc/network_acl.go:91 lxc/network_zone.go:82 +#: lxc/operation.go:101 lxc/profile.go:610 lxc/project.go:406 +#: lxc/storage.go:579 lxc/version.go:20 lxc/warning.go:68 msgid "[:]" msgstr "" -#: lxc/import.go:27 +#: lxc/import.go:26 msgid "[:] []" msgstr "" -#: lxc/cluster.go:962 +#: lxc/cluster.go:1005 msgid "[:] " msgstr "" -#: lxc/cluster.go:511 lxc/config_trust.go:572 +#: lxc/cluster.go:549 lxc/config_trust.go:578 msgid "[:] " msgstr "" -#: lxc/config_trust.go:86 +#: lxc/config_trust.go:85 msgid "[:] []" msgstr "" -#: lxc/image.go:1014 lxc/list.go:47 +#: lxc/image.go:1013 lxc/list.go:46 msgid "[:] [...]" msgstr "" -#: lxc/image_alias.go:150 +#: lxc/image_alias.go:149 msgid "[:] [...]" msgstr "" -#: lxc/network_acl.go:164 lxc/network_acl.go:217 lxc/network_acl.go:482 -#: lxc/network_acl.go:661 +#: lxc/network_acl.go:163 lxc/network_acl.go:216 lxc/network_acl.go:520 +#: lxc/network_acl.go:699 msgid "[:]" msgstr "" -#: lxc/network_acl.go:726 lxc/network_acl.go:847 +#: lxc/network_acl.go:764 lxc/network_acl.go:885 msgid "[:] =..." msgstr "" -#: lxc/network_acl.go:263 lxc/network_acl.go:455 +#: lxc/network_acl.go:264 lxc/network_acl.go:490 msgid "[:] " msgstr "" -#: lxc/network_acl.go:395 +#: lxc/network_acl.go:410 msgid "[:] =..." msgstr "" -#: lxc/network_acl.go:612 +#: lxc/network_acl.go:650 msgid "[:] " msgstr "" -#: lxc/network_acl.go:312 +#: lxc/network_acl.go:325 msgid "[:] [key=value...]" msgstr "" @@ -5279,89 +5651,89 @@ msgstr "" msgid "[:]" msgstr "" -#: lxc/network_zone.go:155 lxc/network_zone.go:425 lxc/network_zone.go:543 +#: lxc/network_zone.go:154 lxc/network_zone.go:463 lxc/network_zone.go:581 msgid "[:]" msgstr "" -#: lxc/network_zone.go:208 lxc/network_zone.go:398 +#: lxc/network_zone.go:209 lxc/network_zone.go:432 msgid "[:] " msgstr "" -#: lxc/network_zone.go:338 +#: lxc/network_zone.go:352 msgid "[:] =..." msgstr "" -#: lxc/network_zone.go:257 +#: lxc/network_zone.go:269 msgid "[:] [key=value...]" msgstr "" -#: lxc/image_alias.go:105 +#: lxc/image_alias.go:104 msgid "[:]" msgstr "" -#: lxc/image_alias.go:59 +#: lxc/image_alias.go:58 msgid "[:] " msgstr "" -#: lxc/image_alias.go:253 +#: lxc/image_alias.go:252 msgid "[:] " msgstr "" -#: lxc/cluster.go:596 +#: lxc/cluster.go:634 msgid "[:]" msgstr "" -#: lxc/config_trust.go:228 lxc/config_trust.go:525 lxc/config_trust.go:643 +#: lxc/config_trust.go:234 lxc/config_trust.go:531 lxc/config_trust.go:649 msgid "[:]" msgstr "" -#: lxc/cluster_group.go:148 lxc/cluster_group.go:202 lxc/cluster_group.go:253 -#: lxc/cluster_group.go:555 +#: lxc/cluster_group.go:155 lxc/cluster_group.go:211 lxc/cluster_group.go:264 +#: lxc/cluster_group.go:575 msgid "[:]" msgstr "" -#: lxc/cluster_group.go:508 +#: lxc/cluster_group.go:526 msgid "[:] " msgstr "" -#: lxc/image.go:359 lxc/image.go:880 lxc/image.go:1414 +#: lxc/image.go:358 lxc/image.go:879 lxc/image.go:1413 msgid "[:]" msgstr "" -#: lxc/image.go:1473 lxc/image.go:1581 +#: lxc/image.go:1472 lxc/image.go:1580 msgid "[:] " msgstr "" -#: lxc/image.go:1525 +#: lxc/image.go:1524 msgid "[:] " msgstr "" -#: lxc/image.go:142 +#: lxc/image.go:141 msgid "[:] :" msgstr "" -#: lxc/init.go:39 lxc/launch.go:23 +#: lxc/init.go:38 lxc/launch.go:22 msgid "[:] [:][]" msgstr "" -#: lxc/image.go:486 +#: lxc/image.go:485 msgid "[:] []" msgstr "" -#: lxc/image.go:307 lxc/image.go:1334 +#: lxc/image.go:306 lxc/image.go:1333 msgid "[:] [[:]...]" msgstr "" -#: lxc/config_device.go:284 lxc/config_device.go:658 lxc/config_metadata.go:53 -#: lxc/config_metadata.go:178 lxc/config_template.go:239 lxc/console.go:34 +#: lxc/config_device.go:288 lxc/config_device.go:662 lxc/config_metadata.go:53 +#: lxc/config_metadata.go:178 lxc/config_template.go:238 lxc/console.go:34 msgid "[:]" msgstr "" -#: lxc/config_device.go:198 lxc/config_device.go:731 +#: lxc/config_device.go:202 lxc/config_device.go:735 msgid "[:] " msgstr "" -#: lxc/config_device.go:543 +#: lxc/config_device.go:547 msgid "[:] =..." msgstr "" @@ -5369,15 +5741,15 @@ msgstr "" msgid "[:] [key=value...]" msgstr "" -#: lxc/config_device.go:350 +#: lxc/config_device.go:354 msgid "[:] [key=value...]" msgstr "" -#: lxc/config_device.go:439 +#: lxc/config_device.go:443 msgid "[:] ..." msgstr "" -#: lxc/profile.go:102 lxc/profile.go:648 +#: lxc/profile.go:102 lxc/profile.go:672 msgid "[:] " msgstr "" @@ -5389,8 +5761,8 @@ msgstr "" msgid "[:] " msgstr "" -#: lxc/config_template.go:66 lxc/config_template.go:108 -#: lxc/config_template.go:151 lxc/config_template.go:299 +#: lxc/config_template.go:65 lxc/config_template.go:107 +#: lxc/config_template.go:150 lxc/config_template.go:298 msgid "[:]