diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 30e9030d905c..000000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @tomponline diff --git a/.shellcheckrc b/.shellcheckrc new file mode 100644 index 000000000000..97d91bae52a7 --- /dev/null +++ b/.shellcheckrc @@ -0,0 +1,2 @@ +# +shell=bash diff --git a/Makefile b/Makefile index 948110959f2f..ce466273c978 100644 --- a/Makefile +++ b/Makefile @@ -263,8 +263,7 @@ ifeq ($(shell command -v flake8),) exit 1 endif flake8 test/deps/import-busybox - shellcheck --shell bash test/*.sh test/includes/*.sh test/suites/*.sh test/backends/*.sh test/lint/*.sh - shellcheck test/extras/*.sh + shellcheck test/*.sh test/includes/*.sh test/suites/*.sh test/backends/*.sh test/lint/*.sh test/extras/*.sh NOT_EXEC="$(shell find test/lint -type f -not -executable)"; \ if [ -n "$$NOT_EXEC" ]; then \ echo "lint scripts not executable: $$NOT_EXEC"; \ diff --git a/lxd-agent/server.go b/lxd-agent/server.go index 0f54a7252a0f..e3a15e47c796 100644 --- a/lxd-agent/server.go +++ b/lxd-agent/server.go @@ -96,7 +96,7 @@ func createCmd(restAPI *mux.Router, version string, c APIEndpoint, cert *x509.Ce if err != nil { writeErr := response.InternalError(err).Render(w) if writeErr != nil { - logger.Error("Failed writing error for HTTP response", logger.Ctx{"url": uri, "error": err, "writeErr": writeErr}) + logger.Error("Failed writing error for HTTP response", logger.Ctx{"url": uri, "err": err, "writeErr": writeErr}) } } }) diff --git a/lxd-migrate/main_migrate.go b/lxd-migrate/main_migrate.go index 7f6763b595e4..7bed2e3b203a 100644 --- a/lxd-migrate/main_migrate.go +++ b/lxd-migrate/main_migrate.go @@ -582,7 +582,7 @@ func (c *cmdMigrate) run(cmd *cobra.Command, args []string) error { return err } - err = transferRootfs(ctx, server, op, fullPath, c.flagRsyncArgs, config.InstanceArgs.Type) + err = transferRootDiskForMigration(ctx, op, fullPath, c.flagRsyncArgs, config.InstanceArgs.Type) if err != nil { return err } diff --git a/lxd-migrate/transfer.go b/lxd-migrate/transfer.go index 4a0dbc245e05..ff05a7672fb3 100644 --- a/lxd-migrate/transfer.go +++ b/lxd-migrate/transfer.go @@ -131,6 +131,28 @@ func rsyncSendSetup(ctx context.Context, path string, rsyncArgs string, instance return cmd, conn, stderr, nil } +func sendBlockVol(ctx context.Context, conn io.WriteCloser, path string) error { + f, err := os.Open(path) + if err != nil { + return err + } + + defer func() { _ = f.Close() }() + + go func() { + <-ctx.Done() + _ = conn.Close() + _ = f.Close() + }() + + _, err = io.Copy(conn, f) + if err != nil { + return err + } + + return conn.Close() +} + func protoSendError(ws *websocket.Conn, err error) { migration.ProtoSendControl(ws, err) diff --git a/lxd-migrate/utils.go b/lxd-migrate/utils.go index 52dcfd6600c2..00f784c08e63 100644 --- a/lxd-migrate/utils.go +++ b/lxd-migrate/utils.go @@ -6,7 +6,6 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "io" "net/url" "os" "path/filepath" @@ -28,7 +27,7 @@ import ( "github.com/canonical/lxd/shared/ws" ) -func transferRootfs(ctx context.Context, dst lxd.InstanceServer, op lxd.Operation, rootfs string, rsyncArgs string, instanceType api.InstanceType) error { +func transferRootDiskForMigration(ctx context.Context, op lxd.Operation, rootfs string, rsyncArgs string, instanceType api.InstanceType) error { opAPI := op.Get() // Connect to the websockets @@ -105,27 +104,7 @@ func transferRootfs(ctx context.Context, dst lxd.InstanceServer, op lxd.Operatio // Send block volume if instanceType == api.InstanceTypeVM { - f, err := os.Open(filepath.Join(rootfs, "root.img")) - if err != nil { - return abort(err) - } - - defer func() { _ = f.Close() }() - - conn := ws.NewWrapper(wsFs) - - go func() { - <-ctx.Done() - _ = conn.Close() - _ = f.Close() - }() - - _, err = io.Copy(conn, f) - if err != nil { - return abort(fmt.Errorf("Failed sending block volume: %w", err)) - } - - err = conn.Close() + err := sendBlockVol(ctx, ws.NewWrapper(wsFs), filepath.Join(rootfs, "root.img")) if err != nil { return abort(err) } diff --git a/lxd/api_project.go b/lxd/api_project.go index ccd28921edc1..993412e44401 100644 --- a/lxd/api_project.go +++ b/lxd/api_project.go @@ -632,14 +632,14 @@ func projectPatch(d *Daemon, r *http.Request) response.Response { req.Description = project.Description } - config, err := reqRaw.GetMap("config") - if err != nil { - req.Config = project.Config - } else { - for k, v := range project.Config { - _, ok := config[k] - if !ok { - config[k] = v + // Perform config patch + req.Config = util.CopyConfig(project.Config) + patches, err := reqRaw.GetMap("config") + if err == nil { + for k, v := range patches { + strVal, ok := v.(string) + if ok { + req.Config[k] = strVal } } } diff --git a/lxd/apparmor/archive.go b/lxd/apparmor/archive.go index e4a691722382..ee9789293dd0 100644 --- a/lxd/apparmor/archive.go +++ b/lxd/apparmor/archive.go @@ -122,7 +122,7 @@ func archiveProfile(outputPath string, allowedCommandPaths []string) (string, er } // Render the profile. - var sb *strings.Builder = &strings.Builder{} + sb := &strings.Builder{} err = archiveProfileTpl.Execute(sb, map[string]any{ "name": ArchiveProfileName(outputPath), // Use non-deferenced outputPath for name. "outputPath": outputPathFull, // Use deferenced path in AppArmor profile. diff --git a/lxd/apparmor/instance.go b/lxd/apparmor/instance.go index 11155c352adc..7059fbef18e9 100644 --- a/lxd/apparmor/instance.go +++ b/lxd/apparmor/instance.go @@ -154,7 +154,7 @@ func instanceProfile(sysOS *sys.OS, inst instance) (string, error) { } // Render the profile. - var sb *strings.Builder = &strings.Builder{} + sb := &strings.Builder{} if inst.Type() == instancetype.Container { err = lxcProfileTpl.Execute(sb, map[string]any{ "feature_cgns": sysOS.CGInfo.Namespacing, diff --git a/lxd/apparmor/rsync.go b/lxd/apparmor/rsync.go index a12a153a950f..346551dfee31 100644 --- a/lxd/apparmor/rsync.go +++ b/lxd/apparmor/rsync.go @@ -175,7 +175,7 @@ func rsyncProfile(sysOS *sys.OS, name string, sourcePath string, dstPath string) execPath = fullPath } - var sb *strings.Builder = &strings.Builder{} + sb := &strings.Builder{} err = rsyncProfileTpl.Execute(sb, map[string]any{ "name": name, "execPath": execPath, diff --git a/lxd/certificates.go b/lxd/certificates.go index ce8bc543674d..a47194fd2036 100644 --- a/lxd/certificates.go +++ b/lxd/certificates.go @@ -341,9 +341,20 @@ func clusterMemberJoinTokenValid(s *state.State, r *http.Request, projectName st // Depending on whether it's a local operation or not, expiry will either be a time.Time or a string. if s.ServerName == foundOp.Location { - expiry, _ = expiresAt.(time.Time) + expiry, ok = expiresAt.(time.Time) + if !ok { + return nil, fmt.Errorf("Unexpected expiry type in cluster join token operation: %T (%v)", expiresAt, expiresAt) + } } else { - expiry, _ = time.Parse(time.RFC3339Nano, expiresAt.(string)) + expiryStr, ok := expiresAt.(string) + if !ok { + return nil, fmt.Errorf("Unexpected expiry type in cluster join token operation: %T (%v)", expiresAt, expiresAt) + } + + expiry, err = time.Parse(time.RFC3339Nano, expiryStr) + if err != nil { + return nil, fmt.Errorf("Invalid expiry format in cluster join token operation: %w (%q)", err, expiryStr) + } } // Check if token has expired. diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go index 5ce8450a4883..5867ea026509 100644 --- a/lxd/cluster/membership.go +++ b/lxd/cluster/membership.go @@ -405,7 +405,7 @@ func Join(state *state.State, gateway *Gateway, networkCert *shared.CertInfo, se return nil }) if err != nil { - logger.Error("Failed to unlock global database after cluster join error", logger.Ctx{"error": err}) + logger.Error("Failed to unlock global database after cluster join error", logger.Ctx{"err": err}) } }) @@ -437,32 +437,32 @@ func Join(state *state.State, gateway *Gateway, networkCert *shared.CertInfo, se return tx.ReplaceRaftNodes([]db.RaftNode{}) }) if err != nil { - logger.Error("Failed to clear local raft node records after cluster join error", logger.Ctx{"error": err}) + logger.Error("Failed to clear local raft node records after cluster join error", logger.Ctx{"err": err}) return } err = gateway.Shutdown() if err != nil { - logger.Error("Failed to shutdown gateway after cluster join error", logger.Ctx{"error": err}) + logger.Error("Failed to shutdown gateway after cluster join error", logger.Ctx{"err": err}) return } err = os.RemoveAll(state.OS.GlobalDatabaseDir()) if err != nil { - logger.Error("Failed to remove raft data after cluster join error", logger.Ctx{"error": err}) + logger.Error("Failed to remove raft data after cluster join error", logger.Ctx{"err": err}) return } gateway.networkCert = oldCert err = gateway.init(false) if err != nil { - logger.Error("Failed to re-initialize gateway after cluster join error", logger.Ctx{"error": err}) + logger.Error("Failed to re-initialize gateway after cluster join error", logger.Ctx{"err": err}) return } _, err = cluster.EnsureSchema(state.DB.Cluster.DB(), localClusterAddress, state.OS.GlobalDatabaseDir()) if err != nil { - logger.Error("Failed to reload schema after cluster join error", logger.Ctx{"error": err}) + logger.Error("Failed to reload schema after cluster join error", logger.Ctx{"err": err}) return } }) diff --git a/lxd/device/config/devices.go b/lxd/device/config/devices.go index e7bb6404f111..655bb42dc375 100644 --- a/lxd/device/config/devices.go +++ b/lxd/device/config/devices.go @@ -97,7 +97,7 @@ func (list Devices) Contains(k string, d Device) bool { // Update returns the difference between two device sets (removed, added, updated devices) and a list of all // changed keys across all devices. Accepts a function to return which keys can be live updated, which prevents // them being removed and re-added if the device supports live updates of certain keys. -func (list Devices) Update(newlist Devices, updateFields func(Device, Device) []string) (map[string]Device, map[string]Device, map[string]Device, []string) { +func (list Devices) Update(newlist Devices, updateFields func(Device, Device) []string) (removedList Devices, addedList Devices, updatedList Devices, changedKeys []string) { rmlist := map[string]Device{} addlist := map[string]Device{} updatelist := map[string]Device{} diff --git a/lxd/device/device_utils_disk.go b/lxd/device/device_utils_disk.go index 44d735f002e9..d65188a65839 100644 --- a/lxd/device/device_utils_disk.go +++ b/lxd/device/device_utils_disk.go @@ -21,6 +21,7 @@ import ( "github.com/canonical/lxd/shared" "github.com/canonical/lxd/shared/osarch" "github.com/canonical/lxd/shared/revert" + "github.com/canonical/lxd/shared/version" ) // RBDFormatPrefix is the prefix used in disk paths to identify RBD. @@ -424,7 +425,7 @@ func DiskVMVirtfsProxyStop(pidPath string) error { // Returns UnsupportedError error if the host system or instance does not support virtiosfd, returns normal error // type if process cannot be started for other reasons. // Returns revert function and listener file handle on success. -func DiskVMVirtiofsdStart(execPath string, inst instance.Instance, socketPath string, pidPath string, logPath string, sharePath string, idmaps []idmap.IdmapEntry) (func(), net.Listener, error) { +func DiskVMVirtiofsdStart(kernelVersion version.DottedVersion, inst instance.Instance, socketPath string, pidPath string, logPath string, sharePath string, idmaps []idmap.IdmapEntry) (func(), net.Listener, error) { revert := revert.New() defer revert.Fail() @@ -498,7 +499,18 @@ func DiskVMVirtiofsdStart(execPath string, inst instance.Instance, socketPath st defer func() { _ = unixFile.Close() }() // Start the virtiofsd process in non-daemon mode. - args := []string{"--fd=3", "-o", fmt.Sprintf("source=%s", sharePath)} + args := []string{ + "--fd=3", + "-o", fmt.Sprintf("source=%s", sharePath), + } + + // Virtiofsd defaults to namespace sandbox mode which requires pidfd_open support. + // This was added in Linux 5.3, so if running an earlier kernel fallback to chroot sandbox mode. + minVer, _ := version.NewDottedVersion("5.3.0") + if kernelVersion.Compare(minVer) < 0 { + args = append(args, "--sandbox=chroot") + } + proc, err := subprocess.NewProcess(cmd, args, logPath, logPath) if err != nil { return nil, nil, err diff --git a/lxd/device/disk.go b/lxd/device/disk.go index b3f0fd6bae6a..820828455c85 100644 --- a/lxd/device/disk.go +++ b/lxd/device/disk.go @@ -210,7 +210,7 @@ func (d *disk) validateConfig(instConf instance.ConfigReader) error { } if d.config["source"] == "" && d.config["path"] != "/" { - return fmt.Errorf(`Disk entry is missing the required "source" or "path" property`) + return fmt.Errorf(`Non root disk devices require the "source" property`) } if d.config["path"] == "/" && d.config["source"] != "" { @@ -898,7 +898,7 @@ func (d *disk) startVM() (*deviceConfig.RunConfig, error) { logPath := filepath.Join(d.inst.LogPath(), fmt.Sprintf("disk.%s.log", d.name)) _ = os.Remove(logPath) // Remove old log if needed. - revertFunc, unixListener, err := DiskVMVirtiofsdStart(d.state.OS.ExecPath, d.inst, sockPath, pidPath, logPath, mount.DevPath, rawIDMaps) + revertFunc, unixListener, err := DiskVMVirtiofsdStart(d.state.OS.KernelVersion, d.inst, sockPath, pidPath, logPath, mount.DevPath, rawIDMaps) if err != nil { var errUnsupported UnsupportedError if errors.As(err, &errUnsupported) { diff --git a/lxd/devlxd.go b/lxd/devlxd.go index ce0a5f00b6d5..3186eccbe17c 100644 --- a/lxd/devlxd.go +++ b/lxd/devlxd.go @@ -294,18 +294,27 @@ func devLxdAPI(d *Daemon, f hoistFunc) http.Handler { */ var pidMapper = ConnPidMapper{m: map[*net.UnixConn]*unix.Ucred{}} +// ConnPidMapper is threadsafe cache of unix connections to process IDs. We use this in hoistReq to determine +// the instance that the connection has been made from. type ConnPidMapper struct { m map[*net.UnixConn]*unix.Ucred mLock sync.Mutex } +// ConnStateHandler is used in the `ConnState` field of the devlxd http.Server so that we can cache the process ID of the +// caller when a new connection is made and delete it when the connection is closed. func (m *ConnPidMapper) ConnStateHandler(conn net.Conn, state http.ConnState) { - unixConn := conn.(*net.UnixConn) + unixConn, _ := conn.(*net.UnixConn) + if unixConn == nil { + logger.Error("Invalid type for devlxd connection", logger.Ctx{"conn_type": fmt.Sprintf("%T", conn)}) + return + } + switch state { case http.StateNew: cred, err := ucred.GetCred(unixConn) if err != nil { - logger.Debugf("Error getting ucred for conn %s", err) + logger.Debug("Error getting ucred for devlxd connection", logger.Ctx{"err": err}) } else { m.mLock.Lock() m.m[unixConn] = cred @@ -333,7 +342,7 @@ func (m *ConnPidMapper) ConnStateHandler(conn net.Conn, state http.ConnState) { delete(m.m, unixConn) m.mLock.Unlock() default: - logger.Debugf("Unknown state for connection %s", state) + logger.Debug("Unknown state for devlxd connection", logger.Ctx{"state": state.String()}) } } @@ -386,7 +395,9 @@ func findContainerForPid(pid int32, s *state.State) (instance.Container, error) return nil, fmt.Errorf("Instance is not container type") } - return inst.(instance.Container), nil + // Explicitly ignore type assertion check. We've just checked that it's a container. + c, _ := inst.(instance.Container) + return c, nil } status, err := os.ReadFile(fmt.Sprintf("/proc/%d/status", pid)) @@ -439,7 +450,9 @@ func findContainerForPid(pid int32, s *state.State) (instance.Container, error) } if origPidNs == pidNs { - return inst.(instance.Container), nil + // Explicitly ignore type assertion check. The instance must be a container if we've found it via the process ID. + c, _ := inst.(instance.Container) + return c, nil } } diff --git a/lxd/firewall/drivers/drivers_xtables.go b/lxd/firewall/drivers/drivers_xtables.go index 3bd255a5fbcd..75e517528ab6 100644 --- a/lxd/firewall/drivers/drivers_xtables.go +++ b/lxd/firewall/drivers/drivers_xtables.go @@ -1548,7 +1548,7 @@ func (d Xtables) NetworkApplyForwards(networkName string, rules []AddressForward reverter.Add(func() { err := clearNetworkForwards() if err != nil { - logger.Error("Failed to clear firewall rules after failing to apply network forwards", logger.Ctx{"network_name": networkName, "error": err}) + logger.Error("Failed to clear firewall rules after failing to apply network forwards", logger.Ctx{"network_name": networkName, "err": err}) } }) diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index c8d3a11d687f..ed352ea11009 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -1357,7 +1357,7 @@ func (d *qemu) start(stateful bool, op *operationlock.InstanceOperation) error { // This is used by the lxd-agent in preference to 9p (due to its improved performance) and in scenarios // where 9p isn't available in the VM guest OS. configSockPath, configPIDPath := d.configVirtiofsdPaths() - revertFunc, unixListener, err := device.DiskVMVirtiofsdStart(d.state.OS.ExecPath, d, configSockPath, configPIDPath, "", configMntPath, nil) + revertFunc, unixListener, err := device.DiskVMVirtiofsdStart(d.state.OS.KernelVersion, d, configSockPath, configPIDPath, "", configMntPath, nil) if err != nil { var errUnsupported device.UnsupportedError if !errors.As(err, &errUnsupported) { diff --git a/lxd/migrate.go b/lxd/migrate.go index cd76b9ee3fee..4c32ee876afa 100644 --- a/lxd/migrate.go +++ b/lxd/migrate.go @@ -144,6 +144,8 @@ type migrationSourceWs struct { pushSecrets map[string]string } +// Metadata returns a map where each key is a connection name and each value is +// the secret of the corresponding websocket connection. func (s *migrationSourceWs) Metadata() any { secrets := make(shared.Jmap, len(s.conns)) for connName, conn := range s.conns { @@ -153,6 +155,9 @@ func (s *migrationSourceWs) Metadata() any { return secrets } +// Connect handles an incoming HTTP request to establish a websocket connection for migration. +// It verifies the provided secret and matches it to the appropriate connection. If the secret +// is valid, it accepts the incoming connection. Otherwise, it returns an error. func (s *migrationSourceWs) Connect(op *operations.Operation, r *http.Request, w http.ResponseWriter) error { incomingSecret := r.FormValue("secret") if incomingSecret == "" { @@ -186,7 +191,7 @@ type migrationSink struct { refresh bool } -// MigrationSinkArgs arguments to configure migration sink. +// migrationSinkArgs arguments to configure migration sink. type migrationSinkArgs struct { // General migration fields Dialer *websocket.Dialer diff --git a/lxd/migrate_instance.go b/lxd/migrate_instance.go index 48b8fcc0a5c3..37ff44d035d8 100644 --- a/lxd/migrate_instance.go +++ b/lxd/migrate_instance.go @@ -81,6 +81,9 @@ func newMigrationSource(inst instance.Instance, stateful bool, instanceOnly bool return &ret, nil } +// Do performs the migration operation on the source side for the given state and +// operation. It sets up the necessary websocket connections for control, state, +// and filesystem, and then initiates the migration process. func (s *migrationSourceWs) Do(state *state.State, migrateOp *operations.Operation) error { l := logger.AddContext(logger.Ctx{"project": s.instance.Project().Name, "instance": s.instance.Name(), "live": s.live, "clusterMoveSourceName": s.clusterMoveSourceName, "push": s.pushOperationURL != ""}) @@ -206,6 +209,9 @@ func newMigrationSink(args *migrationSinkArgs) (*migrationSink, error) { return &sink, nil } +// Do performs the migration operation on the target side (sink) for the given +// state and instance operation. It sets up the necessary websocket connections +// for control, state, and filesystem, and then receives the migration data. func (c *migrationSink) Do(state *state.State, instOp *operationlock.InstanceOperation) error { l := logger.AddContext(logger.Ctx{"project": c.instance.Project().Name, "instance": c.instance.Name(), "live": c.live, "clusterMoveSourceName": c.clusterMoveSourceName, "push": c.push}) diff --git a/lxd/migrate_storage_volumes.go b/lxd/migrate_storage_volumes.go index 7876612c17f8..424ac2a92d75 100644 --- a/lxd/migrate_storage_volumes.go +++ b/lxd/migrate_storage_volumes.go @@ -65,6 +65,9 @@ func newStorageMigrationSource(volumeOnly bool, pushTarget *api.StorageVolumePos return &ret, nil } +// DoStorage handles the migration of a storage volume from the source to the target. +// It waits for migration connections, negotiates migration types, and initiates +// the volume transfer. func (s *migrationSourceWs) DoStorage(state *state.State, projectName string, poolName string, volName string, migrateOp *operations.Operation) error { l := logger.AddContext(logger.Ctx{"project": projectName, "pool": poolName, "volume": volName, "push": s.pushOperationURL != ""}) @@ -238,6 +241,8 @@ func newStorageMigrationSink(args *migrationSinkArgs) (*migrationSink, error) { return &sink, nil } +// DoStorage handles the storage volume migration on the target side. It waits for +// migration connections, negotiates migration types, and initiates the volume reception. func (c *migrationSink) DoStorage(state *state.State, projectName string, poolName string, req *api.StorageVolumesPost, op *operations.Operation) error { l := logger.AddContext(logger.Ctx{"project": projectName, "pool": poolName, "volume": req.Name, "push": c.push}) diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go index cd0a6a0e4d42..cf08f700a86d 100644 --- a/lxd/storage/backend_lxd.go +++ b/lxd/storage/backend_lxd.go @@ -2042,10 +2042,10 @@ func (b *lxdBackend) CreateInstanceFromMigration(inst instance.Instance, conn io // This way if the volume being received is larger than the pool default size, the block volume created // will still be able to accommodate it. if args.VolumeSize > 0 && contentType == drivers.ContentTypeBlock { - b.logger.Debug("Setting volume size from offer header", logger.Ctx{"size": args.VolumeSize}) + l.Debug("Setting volume size from offer header", logger.Ctx{"size": args.VolumeSize}) args.Config["size"] = fmt.Sprintf("%d", args.VolumeSize) } else if args.Config["size"] != "" { - b.logger.Debug("Using volume size from root disk config", logger.Ctx{"size": args.Config["size"]}) + l.Debug("Using volume size from root disk config", logger.Ctx{"size": args.Config["size"]}) } var preFiller drivers.VolumeFiller diff --git a/lxd/util/http.go b/lxd/util/http.go index 787c746709e4..e8fc0a7f560e 100644 --- a/lxd/util/http.go +++ b/lxd/util/http.go @@ -200,7 +200,7 @@ func CheckCASignature(cert x509.Certificate, networkCert *shared.CertInfo) (trus err = crl.CheckSignatureFrom(ca) if err != nil { - logger.Error("Certificate revokation list has not been signed by server CA", logger.Ctx{"error": err}) + logger.Error("Certificate revokation list has not been signed by server CA", logger.Ctx{"err": err}) return false, false, "" } diff --git a/shared/util_linux.go b/shared/util_linux.go index f1b8ac40d433..17288ac5e353 100644 --- a/shared/util_linux.go +++ b/shared/util_linux.go @@ -25,11 +25,13 @@ import ( // --- pure Go functions --- +// GetFileStat retrieves the UID, GID, major and minor device numbers, inode, and number of hard links for +// the given file path. func GetFileStat(p string) (uid int, gid int, major uint32, minor uint32, inode uint64, nlink int, err error) { var stat unix.Stat_t err = unix.Lstat(p, &stat) if err != nil { - return + return 0, 0, 0, 0, 0, 0, err } uid = int(stat.Uid) @@ -41,7 +43,7 @@ func GetFileStat(p string) (uid int, gid int, major uint32, minor uint32, inode minor = unix.Minor(uint64(stat.Rdev)) } - return + return uid, gid, major, minor, inode, nlink, nil } // GetPathMode returns a os.FileMode for the provided path. @@ -55,6 +57,7 @@ func GetPathMode(path string) (os.FileMode, error) { return mode, nil } +// SetSize sets the terminal size to the specified width and height for the given file descriptor. func SetSize(fd int, width int, height int) (err error) { var dimensions [4]uint16 dimensions[0] = uint16(height) @@ -94,8 +97,10 @@ func GetAllXattr(path string) (map[string]string, error) { return xattrs, nil } -var ObjectFound = fmt.Errorf("Found requested object") +// ErrObjectFound indicates that the requested object was found. +var ErrObjectFound = fmt.Errorf("Found requested object") +// LookupUUIDByBlockDevPath finds and returns the UUID of a block device by its path. func LookupUUIDByBlockDevPath(diskDevice string) (string, error) { uuid := "" readUUID := func(path string, info os.FileInfo, err error) error { @@ -117,14 +122,14 @@ func LookupUUIDByBlockDevPath(diskDevice string) (string, error) { uuid = path // Will allows us to avoid needlessly travers // the whole directory. - return ObjectFound + return ErrObjectFound } } return nil } err := filepath.Walk("/dev/disk/by-uuid", readUUID) - if err != nil && err != ObjectFound { + if err != nil && err != ErrObjectFound { return "", fmt.Errorf("Failed to detect UUID: %s", err) } @@ -136,7 +141,9 @@ func LookupUUIDByBlockDevPath(diskDevice string) (string, error) { return uuid[lastSlash+1:], nil } -// Detect whether err is an errno. +// GetErrno detects whether the error is an errno. +// +//revive:disable:error-return Error is returned first because this is similar to assertion. func GetErrno(err error) (errno error, iserrno bool) { sysErr, ok := err.(*os.SyscallError) if ok { @@ -220,10 +227,12 @@ func intArrayToString(arr any) string { return s } +// DeviceTotalMemory returns the total memory of the device by reading /proc/meminfo. func DeviceTotalMemory() (int64, error) { return GetMeminfo("MemTotal") } +// GetMeminfo retrieves the memory information for the specified field from /proc/meminfo. func GetMeminfo(field string) (int64, error) { // Open /proc/meminfo f, err := os.Open("/proc/meminfo") @@ -260,7 +269,7 @@ func GetMeminfo(field string) (int64, error) { } // OpenPtyInDevpts creates a new PTS pair, configures them and returns them. -func OpenPtyInDevpts(devpts_fd int, uid, gid int64) (*os.File, *os.File, error) { +func OpenPtyInDevpts(devptsFD int, uid, gid int64) (*os.File, *os.File, error) { revert := revert.New() defer revert.Fail() var fd int @@ -268,8 +277,8 @@ func OpenPtyInDevpts(devpts_fd int, uid, gid int64) (*os.File, *os.File, error) var err error // Create a PTS pair. - if devpts_fd >= 0 { - fd, err = unix.Openat(devpts_fd, "ptmx", unix.O_RDWR|unix.O_CLOEXEC|unix.O_NOCTTY, 0) + if devptsFD >= 0 { + fd, err = unix.Openat(devptsFD, "ptmx", unix.O_RDWR|unix.O_CLOEXEC|unix.O_NOCTTY, 0) } else { fd, err = unix.Openat(-1, "/dev/ptmx", unix.O_RDWR|unix.O_CLOEXEC|unix.O_NOCTTY, 0) } @@ -301,7 +310,7 @@ func OpenPtyInDevpts(devpts_fd int, uid, gid int64) (*os.File, *os.File, error) pty = os.NewFile(ptyFd, fmt.Sprintf("/dev/pts/%d", id)) } else { - if devpts_fd >= 0 { + if devptsFD >= 0 { return nil, nil, fmt.Errorf("TIOCGPTPEER required but not available") } @@ -401,7 +410,7 @@ func ExitStatus(err error) (int, error) { } // GetPollRevents poll for events on provided fd. -func GetPollRevents(fd int, timeout int, flags int) (int, int, error) { +func GetPollRevents(fd int, timeout int, flags int) (n int, revents int, err error) { pollFd := unix.PollFd{ Fd: int32(fd), Events: int16(flags), @@ -411,7 +420,7 @@ func GetPollRevents(fd int, timeout int, flags int) (int, int, error) { pollFds := []unix.PollFd{pollFd} again: - n, err := unix.Poll(pollFds, timeout) + n, err = unix.Poll(pollFds, timeout) if err != nil { if err == unix.EAGAIN || err == unix.EINTR { goto again @@ -498,10 +507,12 @@ func (w *execWrapper) Read(p []byte) (int, error) { return n, opErr } +// Write writes data to the underlying os.File. func (w *execWrapper) Write(p []byte) (int, error) { return w.f.Write(p) } +// Close closes the underlying os.File. func (w *execWrapper) Close() error { return w.f.Close() } diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh index f6a73e216a9c..03dda5b1231f 100644 --- a/test/includes/lxd.sh +++ b/test/includes/lxd.sh @@ -37,7 +37,7 @@ spawn_lxd() { LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 & else # shellcheck disable=SC2153 - pid="$(cat "${TEST_DIR}/ns/${LXD_NETNS}/PID")" + read -r pid < "${TEST_DIR}/ns/${LXD_NETNS}/PID" LXD_DIR="${lxddir}" nsenter -n -m -t "${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 & fi LXD_PID=$! @@ -96,7 +96,7 @@ respawn_lxd() { if [ "${LXD_NETNS}" = "" ]; then LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 & else - pid="$(cat "${TEST_DIR}/ns/${LXD_NETNS}/PID")" + read -r pid < "${TEST_DIR}/ns/${LXD_NETNS}/PID" LXD_DIR="${lxddir}" nsenter -n -m -t "${pid}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 & fi LXD_PID=$! diff --git a/test/includes/storage.sh b/test/includes/storage.sh index 5189223c1d56..cd65a7327c22 100644 --- a/test/includes/storage.sh +++ b/test/includes/storage.sh @@ -24,7 +24,7 @@ random_storage_backend() { # Return the storage backend being used by a LXD instance storage_backend() { - cat "$1/lxd.backend" + read -r backend < "$1/lxd.backend" && echo "${backend}" } # Return a list of available storage backends diff --git a/test/lint/i18n-up-to-date.sh b/test/lint/i18n-up-to-date.sh index 4af412c953d3..d45c8d7cc4da 100755 --- a/test/lint/i18n-up-to-date.sh +++ b/test/lint/i18n-up-to-date.sh @@ -1,4 +1,5 @@ -#!/bin/sh -eu +#!/bin/bash +set -eu safe_pot_hash() { sed -e "/Project-Id-Version/,/Content-Transfer-Encoding/d" -e "/^#/d" "po/lxd.pot" | md5sum | cut -f1 -d" " diff --git a/test/lint/mixed-whitespace.sh b/test/lint/mixed-whitespace.sh index 8d329561d949..1f950bf32d29 100755 --- a/test/lint/mixed-whitespace.sh +++ b/test/lint/mixed-whitespace.sh @@ -1,4 +1,5 @@ -#!/bin/sh -eu +#!/bin/bash +set -eu echo "Checking for mixed tabs and spaces in shell scripts..." diff --git a/test/lint/negated-is-bool.sh b/test/lint/negated-is-bool.sh index f70d27e1063b..8aeec354966b 100755 --- a/test/lint/negated-is-bool.sh +++ b/test/lint/negated-is-bool.sh @@ -1,4 +1,5 @@ -#!/bin/sh -eu +#!/bin/bash +set -eu echo "Checking usage of negated shared.Is(True|False)*() functions..." diff --git a/test/lint/newline-after-block.sh b/test/lint/newline-after-block.sh index 21972bc50d6c..f37020a79a73 100755 --- a/test/lint/newline-after-block.sh +++ b/test/lint/newline-after-block.sh @@ -1,4 +1,5 @@ -#!/bin/sh -eu +#!/bin/bash +set -eu echo "Checking that functional blocks are followed by newlines..." diff --git a/test/lint/no-oneline-assign-and-test.sh b/test/lint/no-oneline-assign-and-test.sh index c30937d09aec..d180a955a4d2 100755 --- a/test/lint/no-oneline-assign-and-test.sh +++ b/test/lint/no-oneline-assign-and-test.sh @@ -1,4 +1,5 @@ -#!/bin/sh -eu +#!/bin/bash +set -eu echo "Checking for oneline assign & test..." diff --git a/test/lint/no-short-form-imports.sh b/test/lint/no-short-form-imports.sh index 5bf4688d8d8c..9cfbca56f375 100755 --- a/test/lint/no-short-form-imports.sh +++ b/test/lint/no-short-form-imports.sh @@ -1,4 +1,5 @@ -#!/bin/sh -eu +#!/bin/bash +set -eu echo "Checking for short form imports..." diff --git a/test/lint/rest-api-up-to-date.sh b/test/lint/rest-api-up-to-date.sh index f52fd54a5b10..bf3c5874b0e2 100755 --- a/test/lint/rest-api-up-to-date.sh +++ b/test/lint/rest-api-up-to-date.sh @@ -1,4 +1,5 @@ -#!/bin/sh -eu +#!/bin/bash +set -eu f="doc/rest-api.yaml" diff --git a/test/lint/trailing-space.sh b/test/lint/trailing-space.sh index fa44e1c06955..b902fbb645c7 100755 --- a/test/lint/trailing-space.sh +++ b/test/lint/trailing-space.sh @@ -1,4 +1,5 @@ -#!/bin/sh -eu +#!/bin/bash +set -eu echo "Checking that there are no trailing spaces in shell scripts..." diff --git a/test/main.sh b/test/main.sh index 3b5b7acaa3c0..d25b9f284a1c 100755 --- a/test/main.sh +++ b/test/main.sh @@ -1,4 +1,5 @@ -#!/bin/sh -eu +#!/bin/bash +set -eu [ -n "${GOPATH:-}" ] && export "PATH=${GOPATH}/bin:${PATH}" # Don't translate lxc output for parsing in it in tests. @@ -278,6 +279,7 @@ if [ "${1:-"all"}" != "cluster" ]; then run_test test_container_devices_nic_ipvlan "container devices - nic - ipvlan" run_test test_container_devices_nic_sriov "container devices - nic - sriov" run_test test_container_devices_nic_routed "container devices - nic - routed" + run_test test_container_devices_none "container devices - none" run_test test_container_devices_infiniband_physical "container devices - infiniband - physical" run_test test_container_devices_infiniband_sriov "container devices - infiniband - sriov" run_test test_container_devices_proxy "container devices - proxy" diff --git a/test/perf.sh b/test/perf.sh index 2aaa4764a924..a0511c2a3e04 100755 --- a/test/perf.sh +++ b/test/perf.sh @@ -1,4 +1,5 @@ -#!/bin/sh -eu +#!/bin/bash +set -eu # # Performance tests runner # diff --git a/test/suites/backup.sh b/test/suites/backup.sh index 4b9905f69a33..d939f242eeb9 100644 --- a/test/suites/backup.sh +++ b/test/suites/backup.sh @@ -415,13 +415,13 @@ test_backup_import_with_project() { lxc export c3 "${LXD_DIR}/c3.tar.gz" # Remove container and storage pool - lxc rm -f c3 + lxc delete -f c3 lxc storage delete pool_1 # This should succeed as it will fall back on the default pool lxc import "${LXD_DIR}/c3.tar.gz" - lxc rm -f c3 + lxc delete -f c3 # Remove root device lxc profile device remove default root @@ -435,7 +435,7 @@ test_backup_import_with_project() { # Specify pool explicitly lxc import "${LXD_DIR}/c3.tar.gz" -s pool_2 - lxc rm -f c3 + lxc delete -f c3 # Reset default storage pool lxc profile device add default root disk path=/ pool="${default_pool}" @@ -487,6 +487,7 @@ test_backup_export_with_project() { lxc export c1 "${LXD_DIR}/c1-optimized.tar.gz" --optimized-storage --instance-only tar -xzf "${LXD_DIR}/c1-optimized.tar.gz" -C "${LXD_DIR}/optimized" + ls -l "${LXD_DIR}/optimized/backup/" [ -f "${LXD_DIR}/optimized/backup/index.yaml" ] [ -f "${LXD_DIR}/optimized/backup/container.bin" ] [ ! -d "${LXD_DIR}/optimized/backup/snapshots" ] @@ -496,6 +497,7 @@ test_backup_export_with_project() { tar -xzf "${LXD_DIR}/c1.tar.gz" -C "${LXD_DIR}/non-optimized" # check tarball content + ls -l "${LXD_DIR}/non-optimized/backup/" [ -f "${LXD_DIR}/non-optimized/backup/index.yaml" ] [ -d "${LXD_DIR}/non-optimized/backup/container" ] [ ! -d "${LXD_DIR}/non-optimized/backup/snapshots" ] @@ -508,6 +510,7 @@ test_backup_export_with_project() { lxc export c1 "${LXD_DIR}/c1-optimized.tar.gz" --optimized-storage tar -xzf "${LXD_DIR}/c1-optimized.tar.gz" -C "${LXD_DIR}/optimized" + ls -l "${LXD_DIR}/optimized/backup/" [ -f "${LXD_DIR}/optimized/backup/index.yaml" ] [ -f "${LXD_DIR}/optimized/backup/container.bin" ] [ -f "${LXD_DIR}/optimized/backup/snapshots/snap0.bin" ] @@ -517,6 +520,7 @@ test_backup_export_with_project() { tar -xzf "${LXD_DIR}/c1.tar.gz" -C "${LXD_DIR}/non-optimized" # check tarball content + ls -l "${LXD_DIR}/non-optimized/backup/" [ -f "${LXD_DIR}/non-optimized/backup/index.yaml" ] [ -d "${LXD_DIR}/non-optimized/backup/container" ] [ -d "${LXD_DIR}/non-optimized/backup/snapshots/snap0" ] @@ -651,6 +655,7 @@ test_backup_volume_export_with_project() { # Extract backup tarball. tar -xzf "${LXD_DIR}/testvol-optimized.tar.gz" -C "${LXD_DIR}/optimized" + ls -l "${LXD_DIR}/optimized/backup/" [ -f "${LXD_DIR}/optimized/backup/index.yaml" ] [ -f "${LXD_DIR}/optimized/backup/volume.bin" ] [ ! -d "${LXD_DIR}/optimized/backup/volume-snapshots" ] @@ -665,6 +670,7 @@ test_backup_volume_export_with_project() { tar -xzf "${LXD_DIR}/testvol.tar.gz" -C "${LXD_DIR}/non-optimized" # Check tarball content. + ls -l "${LXD_DIR}/non-optimized/backup/" [ -f "${LXD_DIR}/non-optimized/backup/index.yaml" ] [ -d "${LXD_DIR}/non-optimized/backup/volume" ] [ "$(cat "${LXD_DIR}/non-optimized/backup/volume/test")" = "bar" ] @@ -684,6 +690,7 @@ test_backup_volume_export_with_project() { # Extract backup tarball. tar -xzf "${LXD_DIR}/testvol-optimized.tar.gz" -C "${LXD_DIR}/optimized" + ls -l "${LXD_DIR}/optimized/backup/" [ -f "${LXD_DIR}/optimized/backup/index.yaml" ] [ -f "${LXD_DIR}/optimized/backup/volume.bin" ] [ -f "${LXD_DIR}/optimized/backup/volume-snapshots/test-snap0.bin" ] @@ -698,6 +705,7 @@ test_backup_volume_export_with_project() { tar -xzf "${LXD_DIR}/testvol.tar.gz" -C "${LXD_DIR}/non-optimized" # Check tarball content. + ls -l "${LXD_DIR}/non-optimized/backup/" [ -f "${LXD_DIR}/non-optimized/backup/index.yaml" ] [ -d "${LXD_DIR}/non-optimized/backup/volume" ] [ "$(cat "${LXD_DIR}/non-optimized/backup/volume/test")" = "bar" ] @@ -770,7 +778,7 @@ test_backup_volume_export_with_project() { lxc storage volume detach "${custom_vol_pool}" testvol2 c1 lxc storage volume rm "${custom_vol_pool}" testvol lxc storage volume rm "${custom_vol_pool}" testvol2 - lxc rm -f c1 + lxc delete -f c1 rmdir "${LXD_DIR}/optimized" rmdir "${LXD_DIR}/non-optimized" @@ -922,7 +930,7 @@ yes EOF # Remove recovered instance. - lxc rm -f c2 + lxc delete -f c2 ) } diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh index e3f5c8d8b674..b27b65e83c40 100644 --- a/test/suites/clustering.sh +++ b/test/suites/clustering.sh @@ -2111,12 +2111,12 @@ test_clustering_image_replication() { } test_clustering_dns() { - # shellcheck disable=2039,3043 - local LXD_DIR + local lxdDir # Because we do not want tests to only run on Ubuntu (due to cluster's fan network dependency) # instead we will just spawn forkdns directly and check DNS resolution. + # XXX: make a copy of the global LXD_DIR # shellcheck disable=SC2031 lxdDir="${LXD_DIR}" prefix="lxd$$" diff --git a/test/suites/container_devices_none.sh b/test/suites/container_devices_none.sh new file mode 100644 index 000000000000..2325a016a265 --- /dev/null +++ b/test/suites/container_devices_none.sh @@ -0,0 +1,20 @@ +test_container_devices_none() { + ensure_import_testimage + ensure_has_localhost_remote "${LXD_ADDR}" + ctName="ct$$" + lxc launch testimage "${ctName}" + + # Check eth0 interface exists. + lxc exec "${ctName}" -- stat /sys/class/net/eth0 + + # Add none device to remove eth0 interface (simulating network disruption). + lxc config device add "${ctName}" eth0 none + ! lxc exec "${ctName}" -- stat /sys/class/net/eth0 || false + + # Remove device and check eth0 interface is added back. + lxc config device rm "${ctName}" eth0 + lxc exec "${ctName}" -- stat /sys/class/net/eth0 + + # Clean up + lxc rm -f "${ctName}" +} diff --git a/test/suites/projects.sh b/test/suites/projects.sh index c7b211d65790..5686d92ea554 100644 --- a/test/suites/projects.sh +++ b/test/suites/projects.sh @@ -20,6 +20,10 @@ test_projects_crud() { lxc project show foo | grep -q 'features.images: "true"' lxc project get foo "features.profiles" | grep -q 'true' + # Set a limit + lxc project set foo limits.containers 10 + lxc project show foo | grep -q 'limits.containers: "10"' + # Trying to create a project with the same name fails ! lxc project create foo || false @@ -37,6 +41,13 @@ test_projects_crud() { lxc project show bar| sed 's/^description:.*/description: "Bar project"/' | lxc project edit bar lxc project show bar | grep -q "description: Bar project" + # Edit the project config via PATCH. Existing key/value pairs should remain or be updated. + lxc query -X PATCH -d '{\"config\" : {\"limits.memory\":\"5GiB\",\"features.images\":\"false\"}}' /1.0/projects/bar + lxc project show bar | grep -q 'limits.memory: 5GiB' + lxc project show bar | grep -q 'features.images: "false"' + lxc project show bar | grep -q 'features.profiles: "true"' + lxc project show bar | grep -q 'limits.containers: "10"' + # Create a second project lxc project create foo diff --git a/test/suites/remote.sh b/test/suites/remote.sh index 8637abb8271f..ade7aca3564c 100644 --- a/test/suites/remote.sh +++ b/test/suites/remote.sh @@ -109,8 +109,8 @@ test_remote_url_with_token() { lxc config trust rm "$(lxc config trust list -f json | jq -r '.[].fingerprint')" - # Set token expiry to 5 seconds - lxc config set core.remote_token_expiry 5S + # Set token expiry to 1 seconds + lxc config set core.remote_token_expiry 1S # Generate new token token="$(lxc config trust add --name foo | tail -n1)" @@ -128,7 +128,7 @@ test_remote_url_with_token() { token="$(lxc config trust add --name foo | tail -n1)" # This will cause the token to expire - sleep 5 + sleep 2 # Try adding remote. This should fail. ! lxc_remote remote add test "${token}" || false diff --git a/test/suites/security.sh b/test/suites/security.sh index 7829f24bd817..a18bf4f15f3c 100644 --- a/test/suites/security.sh +++ b/test/suites/security.sh @@ -24,14 +24,14 @@ test_security() { lxc launch testimage test-priv -c security.privileged=true PERM=$(stat -L -c %a "${LXD_DIR}/containers/test-priv") - UID=$(stat -L -c %u "${LXD_DIR}/containers/test-priv") + FUID=$(stat -L -c %u "${LXD_DIR}/containers/test-priv") if [ "${PERM}" != "100" ]; then echo "Bad container permissions: ${PERM}" false fi - if [ "${UID}" != "0" ]; then - echo "Bad container owner: ${UID}" + if [ "${FUID}" != "0" ]; then + echo "Bad container owner: ${FUID}" false fi @@ -41,14 +41,14 @@ test_security() { lxc restart test-priv --force PERM=$(stat -L -c %a "${LXD_DIR}/containers/test-priv") - UID=$(stat -L -c %u "${LXD_DIR}/containers/test-priv") + FUID=$(stat -L -c %u "${LXD_DIR}/containers/test-priv") if [ "${PERM}" != "100" ]; then echo "Bad container permissions: ${PERM}" false fi - if [ "${UID}" != "0" ]; then - echo "Bad container owner: ${UID}" + if [ "${FUID}" != "0" ]; then + echo "Bad container owner: ${FUID}" false fi @@ -59,14 +59,14 @@ test_security() { lxc restart test-unpriv --force PERM=$(stat -L -c %a "${LXD_DIR}/containers/test-unpriv") - UID=$(stat -L -c %u "${LXD_DIR}/containers/test-unpriv") + FUID=$(stat -L -c %u "${LXD_DIR}/containers/test-unpriv") if [ "${PERM}" != "100" ]; then echo "Bad container permissions: ${PERM}" false fi - if [ "${UID}" != "0" ]; then - echo "Bad container owner: ${UID}" + if [ "${FUID}" != "0" ]; then + echo "Bad container owner: ${FUID}" false fi @@ -74,14 +74,14 @@ test_security() { lxc restart test-unpriv --force PERM=$(stat -L -c %a "${LXD_DIR}/containers/test-unpriv") - UID=$(stat -L -c %u "${LXD_DIR}/containers/test-unpriv") + FUID=$(stat -L -c %u "${LXD_DIR}/containers/test-unpriv") if [ "${PERM}" != "100" ]; then echo "Bad container permissions: ${PERM}" false fi - if [ "${UID}" = "0" ]; then - echo "Bad container owner: ${UID}" + if [ "${FUID}" = "0" ]; then + echo "Bad container owner: ${FUID}" false fi diff --git a/test/suites/storage_driver_dir.sh b/test/suites/storage_driver_dir.sh index a4a4440ffbdb..a54939aa088f 100644 --- a/test/suites/storage_driver_dir.sh +++ b/test/suites/storage_driver_dir.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash test_storage_driver_dir() { do_dir_on_empty_fs