From 038a0dd8d84a269df503616406735dd3ce37a8b7 Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Tue, 12 Mar 2024 16:31:58 +0300 Subject: [PATCH] Some fixes after PR review Signed-off-by: Aleksandr Zimin --- .github/workflows/build_dev.yml | 2 + .github/workflows/build_prod.yml | 2 + .werf/images.yaml | 2 + images/agent/Dockerfile | 16 ++- images/agent/config/config.go | 3 +- images/agent/internal/const.go | 3 + .../controller/lvm_volume_group_watcher.go | 64 +++++----- .../lvm_volume_group_watcher_func.go | 109 ++++++------------ .../lvm_volume_group_watcher_test.go | 23 ++-- images/agent/pkg/utils/commands.go | 71 ++++++------ images/static-utils-copier/Dockerfile | 40 ++++--- templates/agent/daemonset.yaml | 11 +- werf-giterminism.yaml | 2 +- 13 files changed, 162 insertions(+), 186 deletions(-) diff --git a/.github/workflows/build_dev.yml b/.github/workflows/build_dev.yml index 5a690d97..877bb804 100644 --- a/.github/workflows/build_dev.yml +++ b/.github/workflows/build_dev.yml @@ -7,6 +7,8 @@ env: MODULES_MODULE_SOURCE: ${{ vars.DEV_MODULE_SOURCE }} MODULES_REGISTRY_LOGIN: ${{ vars.DEV_MODULES_REGISTRY_LOGIN }} MODULES_REGISTRY_PASSWORD: ${{ secrets.DEV_MODULES_REGISTRY_PASSWORD }} + # for ex https://user:password@my-repo.com/group + SOURCE_REPO: "${{ secrets.SOURCE_REPO }}" on: pull_request: diff --git a/.github/workflows/build_prod.yml b/.github/workflows/build_prod.yml index 827c38a1..f159845c 100644 --- a/.github/workflows/build_prod.yml +++ b/.github/workflows/build_prod.yml @@ -8,6 +8,8 @@ env: MODULES_REGISTRY_LOGIN: ${{ vars.PROD_MODULES_REGISTRY_LOGIN }} MODULES_REGISTRY_PASSWORD: ${{ secrets.PROD_MODULES_REGISTRY_PASSWORD }} MODULES_MODULE_TAG: ${{ github.ref_name }} + # for ex https://user:password@my-repo.com/group + SOURCE_REPO: "${{ secrets.SOURCE_REPO }}" on: push: diff --git a/.werf/images.yaml b/.werf/images.yaml index 47b93856..0908d90a 100644 --- a/.werf/images.yaml +++ b/.werf/images.yaml @@ -8,6 +8,8 @@ image: images/{{ $ctx.ImageName }} context: images/{{ $ctx.ImageName }} dockerfile: Dockerfile +args: + SOURCE_REPO: {{ env "SOURCE_REPO" | default "https://github.com" }} {{- /* For werf.inc.yaml render content by providing the ImageName param. */ -}} {{- else }} diff --git a/images/agent/Dockerfile b/images/agent/Dockerfile index 2121aa86..15e696e8 100644 --- a/images/agent/Dockerfile +++ b/images/agent/Dockerfile @@ -4,6 +4,9 @@ ARG BASE_IMAGE=registry.deckhouse.io/base_images/scratch@sha256:b054705fcc9f2205 ################################# FROM $UBUNTU_UTILS_BUILDER as util-linux-builder +ARG SOURCE_REPO +ARG UTIL_LINUX_VERSION=2.39.3 + RUN apt-get update && apt-get install -y \ build-essential \ git \ @@ -17,12 +20,13 @@ RUN apt-get update && apt-get install -y \ flex \ && rm -rf /var/lib/apt/lists/* -RUN git clone https://github.com/util-linux/util-linux.git /util-linux - WORKDIR /util-linux -RUN git checkout v2.39.3 -RUN ./autogen.sh -RUN ./configure LDFLAGS="-static" --enable-static-programs -disable-shared + +RUN git clone ${SOURCE_REPO}/util-linux/util-linux.git . && \ + git checkout v${UTIL_LINUX_VERSION} && \ + ./autogen.sh && \ + ./configure LDFLAGS="-static" --enable-static-programs -disable-shared + RUN make LDFLAGS="--static" nsenter ################################# @@ -42,7 +46,7 @@ RUN GOOS=linux GOARCH=amd64 go build -o sds-node-configurator-agent ################################# FROM --platform=linux/amd64 $BASE_IMAGE -COPY --from=util-linux-builder /util-linux/nsenter /usr/local/bin/flant/nsenter.static +COPY --from=util-linux-builder /util-linux/nsenter /opt/deckhouse/sds/nsenter.static COPY --from=agent-builder /go/src/cmd/sds-node-configurator-agent /go/src/cmd/sds-node-configurator-agent CMD ["/go/src/cmd/sds-node-configurator-agent"] diff --git a/images/agent/config/config.go b/images/agent/config/config.go index 843a1af5..ccf4d7ff 100644 --- a/images/agent/config/config.go +++ b/images/agent/config/config.go @@ -21,6 +21,7 @@ import ( "fmt" "os" "os/exec" + "sds-node-configurator/internal" "sds-node-configurator/pkg/logger" "time" ) @@ -80,7 +81,7 @@ func getMachineId() (string, error) { args := []string{"-m", "-u", "-i", "-n", "-p", "-t", "1", "cat", "/etc/machine-id"} var stdout bytes.Buffer - cmd := exec.Command("/usr/local/bin/flant/nsenter.static", args...) + cmd := exec.Command(internal.NSENTERCmd, args...) cmd.Stdout = &stdout err := cmd.Run() if err != nil { diff --git a/images/agent/internal/const.go b/images/agent/internal/const.go index 094673c4..ddba703f 100644 --- a/images/agent/internal/const.go +++ b/images/agent/internal/const.go @@ -29,6 +29,9 @@ const ( ResizeDelta = "32Mi" KubernetesApiRequestLimit = 5 KubernetesApiRequestTimeout = 1 + NSENTERCmd = "/opt/deckhouse/sds/nsenter.static" + LSBLKCmd = "/opt/deckhouse/sds/lsblk.static" + LVMCmd = "/opt/deckhouse/sds/lvm.static" ) var ( diff --git a/images/agent/pkg/controller/lvm_volume_group_watcher.go b/images/agent/pkg/controller/lvm_volume_group_watcher.go index 1b991f1f..2b780fca 100644 --- a/images/agent/pkg/controller/lvm_volume_group_watcher.go +++ b/images/agent/pkg/controller/lvm_volume_group_watcher.go @@ -80,7 +80,7 @@ func RunLVMVolumeGroupWatcherController( } createFunc := func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupController] event create LVMVolumeGroup, name: %s", e.Object.GetName())) + log.Info(fmt.Sprintf("[RunLVMVolumeGroupController] Get event CREATE for resource LVMVolumeGroup, name: %s", e.Object.GetName())) request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} shouldRequeue, err := ReconcileLVMVG(ctx, metrics, e.Object.GetName(), e.Object.GetNamespace(), cfg.NodeName, log, cl) @@ -92,7 +92,7 @@ func RunLVMVolumeGroupWatcherController( } updateFunc := func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] update LVMVolumeGroupn, name: %s", e.ObjectNew.GetName())) + log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] Get event UPDATE for resource LVMVolumeGroup, name: %s", e.ObjectNew.GetName())) newLVG, ok := e.ObjectNew.(*v1alpha1.LvmVolumeGroup) if !ok { @@ -200,39 +200,32 @@ func ReconcileLVMVG( log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error getLVMVolumeGroup, objectname: %s", objectName)) return true, err } - validation, status, err := ValidateLVMGroup(ctx, cl, metrics, lvg, objectNameSpace, nodeName) - if lvg == nil { err = errors.New("nil pointer detected") log.Error(err, "[ReconcileLVMVG] requested LVMVG group in nil") return true, err } - if status.Health == NonOperational { - health := status.Health - var message string - if err != nil { - message = err.Error() - } - - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] ValidateLVMGroup, resource name: %s, message: %s", lvg.Name, message)) - err = updateLVMVolumeGroupHealthStatus(ctx, cl, metrics, lvg.Name, lvg.Namespace, message, health) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error update LVMVolumeGroup %s", lvg.Name)) - return true, err - } - } + isOwnedByNode, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, lvg, objectNameSpace, nodeName) if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] validationLVMGroup failed, resource name: %s", lvg.Name)) - return false, err + log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error CheckLVMVGNodeOwnership, resource name: %s", lvg.Name)) + if status.Health == NonOperational { + health := status.Health + message := status.Message + log.Error(err, fmt.Sprintf("[ReconcileLVMVG] ValidateLVMGroup, resource name: %s, health: %s, phase: %s, message: %s", lvg.Name, health, status.Phase, message)) + err = updateLVMVolumeGroupHealthStatus(ctx, cl, metrics, lvg.Name, lvg.Namespace, message, health) + if err != nil { + log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error update LVMVolumeGroup %s", lvg.Name)) + return true, err + } + } + return true, err } - if validation == false { - err = errors.New("resource validation failed") - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] validation failed for resource, name: %s", lvg.Name)) - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] status.Message = %s", status.Message)) - return false, err + if !isOwnedByNode { + log.Debug(fmt.Sprintf("[ReconcileLVMVG] resource is not owned by node, name: %s, skip it", lvg.Name)) + return false, nil } log.Info("[ReconcileLVMVG] validation passed") @@ -272,14 +265,14 @@ func ReconcileLVMVG( } log.Info(fmt.Sprintf(`[ReconcileLVMVG] event was created for resource, name: %s`, lvg.Name)) - existVG, err := ExistVG(lvg.Spec.ActualVGNameOnTheNode, log, metrics) + isVgExist, vg, err := GetVGFromNode(lvg.Spec.ActualVGNameOnTheNode, log, metrics) if err != nil { log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error ExistVG, name: %s", lvg.Spec.ActualVGNameOnTheNode)) return true, err } - if existVG { - log.Debug("[ReconcileLVMVG] tries to update ") - updated, err := UpdateLVMVolumeGroupTagsName(log, metrics, lvg) + if isVgExist { + log.Debug("[ReconcileLVMVG] start UpdateLVMVolumeGroupTagsName for r " + lvg.Name) + updated, err := UpdateLVMVolumeGroupTagsName(log, metrics, vg, lvg) if err != nil { log.Error(err, fmt.Sprintf("[ReconcileLVMVG] unable to update VG tags on VG, name: %s", lvg.Spec.ActualVGNameOnTheNode)) return true, err @@ -292,16 +285,16 @@ func ReconcileLVMVG( } log.Info("[ReconcileLVMVG] validation and choosing the type of operation") - extendPVs, shrinkPVs, err := ValidateTypeLVMGroup(ctx, cl, metrics, lvg, log) + extendPVs, shrinkPVs, err := ValidateOperationTypeLVMGroup(ctx, cl, metrics, lvg, log) if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error ValidateTypeLVMGroup, name: %s", lvg.Name)) + log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error ValidateOperationTypeLVMGroup, name: %s", lvg.Name)) return true, err } - if err == nil && extendPVs == nil && shrinkPVs == nil { - log.Warning("[ReconcileLVMVG] ValidateTypeLVMGroup FAIL") - //todo retry and send message - } + // if err == nil && extendPVs == nil && shrinkPVs == nil { + // log.Warning(fmt.Sprintf("[ReconcileLVMVG] ValidateOperationTypeLVMGroup FAIL for resource %s", lvg.Name)) + // //todo retry and send message + // } log.Debug("----- extendPVs list -----") for _, pvExt := range extendPVs { @@ -449,6 +442,7 @@ func ReconcileLVMVG( log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error CreateEventLVMVolumeGroup, resource name: %s", lvg.Name)) } + log.Debug("[ReconcileLVMVG] Start CreateVGComplex function for resource " + lvg.Name) err := CreateVGComplex(ctx, cl, metrics, lvg, log) if err != nil { log.Error(err, fmt.Sprintf("[ReconcileLVMVG] unable to CreateVGComplex for resource, name: %s", lvg.Name)) diff --git a/images/agent/pkg/controller/lvm_volume_group_watcher_func.go b/images/agent/pkg/controller/lvm_volume_group_watcher_func.go index 1b7ac562..fce5e3f4 100644 --- a/images/agent/pkg/controller/lvm_volume_group_watcher_func.go +++ b/images/agent/pkg/controller/lvm_volume_group_watcher_func.go @@ -109,7 +109,7 @@ func getBlockDevice(ctx context.Context, cl client.Client, metrics monitoring.Me return obj, nil } -func ValidateLVMGroup(ctx context.Context, cl client.Client, metrics monitoring.Metrics, lvmVolumeGroup *v1alpha1.LvmVolumeGroup, namespace, nodeName string) (bool, *StatusLVMVolumeGroup, error) { +func CheckLVMVGNodeOwnership(ctx context.Context, cl client.Client, metrics monitoring.Metrics, lvmVolumeGroup *v1alpha1.LvmVolumeGroup, namespace, nodeName string) (bool, *StatusLVMVolumeGroup, error) { status := StatusLVMVolumeGroup{} if lvmVolumeGroup == nil { return false, nil, errors.New("lvmVolumeGroup is nil") @@ -120,7 +120,10 @@ func ValidateLVMGroup(ctx context.Context, cl client.Client, metrics monitoring. for _, blockDev := range lvmVolumeGroup.Spec.BlockDeviceNames { device, err := getBlockDevice(ctx, cl, metrics, namespace, blockDev) if err != nil { + err = fmt.Errorf("error getBlockDevice: %s", err) status.Health = NonOperational + status.Phase = Failed + status.Message = err.Error() return false, &status, err } if device.Status.NodeName == nodeName { @@ -136,7 +139,7 @@ func ValidateLVMGroup(ctx context.Context, cl client.Client, metrics monitoring. status.Health = NonOperational status.Phase = Failed status.Message = "selected block devices are from different nodes for local LVMVolumeGroup" - return false, &status, errors.New("wrong block devices selected") + return false, &status, nil } if membership == 0 { @@ -144,31 +147,10 @@ func ValidateLVMGroup(ctx context.Context, cl client.Client, metrics monitoring. } } - if lvmVolumeGroup.Spec.Type == Shared { - if len(lvmVolumeGroup.Spec.BlockDeviceNames) != 1 { - status.Health = NonOperational - status.Phase = Failed - status.Message = "several block devices are selected for the shared LVMVolumeGroup" - return false, &status, errors.New(status.Message) - } - - singleBD := lvmVolumeGroup.Spec.BlockDeviceNames[0] - bd, err := getBlockDevice(ctx, cl, metrics, namespace, singleBD) - if err != nil { - status.Health = NonOperational - status.Phase = Failed - status.Message = "selected unknown block device for the shared LVMVolumeGroup" - return false, &status, err - } - - if bd.Status.NodeName == nodeName { - return true, &status, nil - } - } return false, &status, nil } -func ValidateTypeLVMGroup(ctx context.Context, cl client.Client, metrics monitoring.Metrics, lvmVolumeGroup *v1alpha1.LvmVolumeGroup, l logger.Logger) (extendPV, shrinkPV []string, err error) { +func ValidateOperationTypeLVMGroup(ctx context.Context, cl client.Client, metrics monitoring.Metrics, lvmVolumeGroup *v1alpha1.LvmVolumeGroup, l logger.Logger) (extendPV, shrinkPV []string, err error) { pvs, cmdStr, _, err := utils.GetAllPVs() l.Debug(fmt.Sprintf("GetAllPVs exec cmd: %s", cmdStr)) if err != nil { @@ -182,36 +164,27 @@ func ValidateTypeLVMGroup(ctx context.Context, cl client.Client, metrics monitor } if dev.Status.Consumable == true { - extendPV = append(extendPV, dev.Status.Path) + isReallyConsumable := true + for _, pv := range pvs { + if pv.PVName == dev.Status.Path && pv.VGName == lvmVolumeGroup.Spec.ActualVGNameOnTheNode { + isReallyConsumable = false + break + } + } + if isReallyConsumable { + extendPV = append(extendPV, dev.Status.Path) + } + continue } if dev.Status.ActualVGNameOnTheNode != lvmVolumeGroup.Spec.ActualVGNameOnTheNode && (len(dev.Status.VGUuid) != 0) { - return nil, nil, nil - // validation fail, send message => LVG ? + err = fmt.Errorf("block device %s is already in use by another VG: %s with uuid %s. Our VG: %s with uuid %s", devName, dev.Status.ActualVGNameOnTheNode, dev.Status.VGUuid, lvmVolumeGroup.Spec.ActualVGNameOnTheNode, dev.Status.VGUuid) + return nil, nil, err } + // TODO: realisation of shrinkPV } - var flag bool - - for _, pv := range pvs { - if pv.VGName == lvmVolumeGroup.Spec.ActualVGNameOnTheNode { - flag = false - for _, devName := range lvmVolumeGroup.Spec.BlockDeviceNames { - dev, err := getBlockDevice(ctx, cl, metrics, lvmVolumeGroup.Namespace, devName) - if err != nil { - return nil, nil, err - } - - if pv.PVUuid == dev.Status.PVUuid { - flag = true - } - } - } - if !flag && pv.VGName == lvmVolumeGroup.Spec.ActualVGNameOnTheNode { - shrinkPV = append(shrinkPV, pv.PVName) - } - } return extendPV, shrinkPV, nil } @@ -332,24 +305,25 @@ func DeleteVG(vgName string, log logger.Logger, metrics monitoring.Metrics) erro return nil } -func ExistVG(vgName string, log logger.Logger, metrics monitoring.Metrics) (bool, error) { +func GetVGFromNode(vgName string, log logger.Logger, metrics monitoring.Metrics) (bool, internal.VGData, error) { start := time.Now() - vg, command, _, err := utils.GetAllVGs() + var vg internal.VGData + vgs, command, _, err := utils.GetAllVGs() metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgs").Observe(metrics.GetEstimatedTimeInSeconds(start)) metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgs").Inc() log.Debug(command) if err != nil { metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "vgs").Inc() log.Error(err, " error CreateEventLVMVolumeGroup") - return false, err + return false, vg, err } - for _, v := range vg { - if v.VGName == vgName { - return true, nil + for _, vg := range vgs { + if vg.VGName == vgName { + return true, vg, nil } } - return false, nil + return false, vg, nil } func ValidateConsumableDevices(ctx context.Context, cl client.Client, metrics monitoring.Metrics, group *v1alpha1.LvmVolumeGroup) (bool, error) { @@ -422,7 +396,8 @@ func CreateVGComplex(ctx context.Context, cl client.Client, metrics monitoring.M return err } if !allDevicesConsumable { - l.Error(err, " error not all devices is consumable") + err = fmt.Errorf("not all devices is consumable") + l.Error(err, "error ValidateConsumableDevices") return err } paths, err := GetPathsConsumableDevicesFromLVMVG(ctx, cl, metrics, group) @@ -473,31 +448,13 @@ func CreateVGComplex(ctx context.Context, cl client.Client, metrics monitoring.M return nil } -func UpdateLVMVolumeGroupTagsName(log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LvmVolumeGroup) (bool, error) { +func UpdateLVMVolumeGroupTagsName(log logger.Logger, metrics monitoring.Metrics, vg internal.VGData, lvg *v1alpha1.LvmVolumeGroup) (bool, error) { const tag = "storage.deckhouse.io/lvmVolumeGroupName" - start := time.Now() - vgs, cmd, _, err := utils.GetAllVGs() - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgs").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgs").Inc() - log.Debug(fmt.Sprintf("[ReconcileLVMVG] exec cmd: %s", cmd)) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] unable to get VG by resource, name: %s", lvg.Name)) - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "vgs").Inc() - return false, err - } - - var vg internal.VGData - for _, v := range vgs { - if v.VGName == lvg.Spec.ActualVGNameOnTheNode { - vg = v - } - } - found, tagName := CheckTag(vg.VGTags) if found && lvg.Name != tagName { - start = time.Now() - cmd, err = utils.VGChangeDelTag(vg.VGName, fmt.Sprintf("%s=%s", tag, tagName)) + start := time.Now() + cmd, err := utils.VGChangeDelTag(vg.VGName, fmt.Sprintf("%s=%s", tag, tagName)) metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgchange").Observe(metrics.GetEstimatedTimeInSeconds(start)) metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgchange").Inc() log.Debug(fmt.Sprintf("[UpdateLVMVolumeGroupTagsName] exec cmd: %s", cmd)) diff --git a/images/agent/pkg/controller/lvm_volume_group_watcher_test.go b/images/agent/pkg/controller/lvm_volume_group_watcher_test.go index 661d86a4..fce7c7fe 100644 --- a/images/agent/pkg/controller/lvm_volume_group_watcher_test.go +++ b/images/agent/pkg/controller/lvm_volume_group_watcher_test.go @@ -2,12 +2,13 @@ package controller import ( "context" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sds-node-configurator/api/v1alpha1" "sds-node-configurator/pkg/monitoring" "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { @@ -318,7 +319,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }) t.Run("ValidateLVMGroup_lvg_is_nil_returns_error", func(t *testing.T) { - valid, obj, err := ValidateLVMGroup(ctx, cl, metrics, nil, "test_ns", "test_node") + valid, obj, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, nil, "test_ns", "test_node") assert.False(t, valid) assert.Nil(t, obj) assert.EqualError(t, err, "lvmVolumeGroup is nil") @@ -350,7 +351,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testObj, namespace, "test_node") + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testObj, namespace, "test_node") assert.False(t, valid) if assert.NotNil(t, status) { assert.Equal(t, NonOperational, status.Health) @@ -431,7 +432,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testLvg, namespace, testNode) + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testLvg, namespace, testNode) assert.False(t, valid) if assert.NotNil(t, status) { assert.Equal(t, NonOperational, status.Health) @@ -513,7 +514,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testLvg, namespace, "another-node") + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testLvg, namespace, "another-node") assert.False(t, valid) if assert.NotNil(t, status) { assert.Equal(t, NonOperational, status.Health) @@ -595,7 +596,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testLvg, namespace, testNode) + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testLvg, namespace, testNode) assert.True(t, valid) if assert.NotNil(t, status) { assert.Equal(t, "", status.Health) @@ -677,7 +678,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testLvg, namespace, testNode) + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testLvg, namespace, testNode) assert.False(t, valid) if assert.NotNil(t, status) { assert.Equal(t, NonOperational, status.Health) @@ -749,7 +750,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testLvg, namespace, testNode) + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testLvg, namespace, testNode) assert.False(t, valid) if assert.NotNil(t, status) { assert.Equal(t, NonOperational, status.Health) @@ -821,7 +822,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testLvg, namespace, testNode) + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testLvg, namespace, testNode) assert.True(t, valid) if assert.NotNil(t, status) { assert.Equal(t, "", status.Health) diff --git a/images/agent/pkg/utils/commands.go b/images/agent/pkg/utils/commands.go index 6dd6eb0e..91e15a05 100644 --- a/images/agent/pkg/utils/commands.go +++ b/images/agent/pkg/utils/commands.go @@ -23,18 +23,13 @@ import ( "os/exec" "sds-node-configurator/api/v1alpha1" "sds-node-configurator/internal" - "strings" -) - -const ( - nsenter = "/usr/local/bin/flant/nsenter.static" ) func GetBlockDevices() ([]internal.Device, string, error) { var outs bytes.Buffer - args := []string{"/opt/deckhouse/bin/sds/lsblk.static", "-J", "-lpfb", "-no", "name,MOUNTPOINT,PARTUUID,HOTPLUG,MODEL,SERIAL,SIZE,FSTYPE,TYPE,WWN,KNAME,PKNAME,ROTA"} + args := []string{internal.LSBLKCmd, "-J", "-lpfb", "-no", "name,MOUNTPOINT,PARTUUID,HOTPLUG,MODEL,SERIAL,SIZE,FSTYPE,TYPE,WWN,KNAME,PKNAME,ROTA"} extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs var stderr bytes.Buffer @@ -57,7 +52,7 @@ func GetAllVGs() (data []internal.VGData, command string, stdErr bytes.Buffer, e var outs bytes.Buffer args := []string{"vgs", "-o", "+uuid,tags,shared", "--units", "B", "--nosuffix", "--reportformat", "json"} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -77,7 +72,7 @@ func GetAllLVs() (data []internal.LVData, command string, stdErr bytes.Buffer, e var outs bytes.Buffer args := []string{"lvs", "-o", "+vg_uuid,tags", "--units", "B", "--nosuffix", "--reportformat", "json"} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -99,7 +94,7 @@ func GetLV(vgName, lvName string) (lvData internal.LVData, command string, stdEr lvPath := fmt.Sprintf("/dev/%s/%s", vgName, lvName) args := []string{"lvs", "-o", "+vg_uuid,tags", "--units", "B", "--nosuffix", "--reportformat", "json", lvPath} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -120,7 +115,7 @@ func GetAllPVs() (data []internal.PVData, command string, stdErr bytes.Buffer, e var outs bytes.Buffer args := []string{"pvs", "-o", "+pv_used,pv_uuid,vg_tags,vg_uuid", "--units", "B", "--nosuffix", "--reportformat", "json"} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -139,7 +134,7 @@ func GetAllPVs() (data []internal.PVData, command string, stdErr bytes.Buffer, e func CreatePV(path string) (string, error) { args := []string{"pvcreate", path} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -151,12 +146,14 @@ func CreatePV(path string) (string, error) { return cmd.String(), nil } -func CreateVGLocal(vgName, lvmName string, pvNames []string) (string, error) { - tmpStr := fmt.Sprintf("storage.deckhouse.io/lvmVolumeGroupName=%s", lvmName) - args := []string{"vgcreate", vgName, strings.Join(pvNames, " "), "--addtag", "storage.deckhouse.io/enabled=true", "--addtag", tmpStr} +func CreateVGLocal(vgName, lvmVolumeGroupName string, pvNames []string) (string, error) { + tmpStr := fmt.Sprintf("storage.deckhouse.io/lvmVolumeGroupName=%s", lvmVolumeGroupName) + args := []string{"vgcreate", vgName} + args = append(args, pvNames...) + args = append(args, "--addtag", "storage.deckhouse.io/enabled=true", "--addtag", tmpStr) extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -168,10 +165,14 @@ func CreateVGLocal(vgName, lvmName string, pvNames []string) (string, error) { return cmd.String(), nil } -func CreateVGShared(vgName, lvmName string, pvNames []string) (string, error) { - args := []string{"vgcreate", "--shared", vgName, strings.Join(pvNames, " "), "--addtag", "storage.deckhouse.io/enabled=true", "--addtag", fmt.Sprintf("storage.deckhouse.io/lvmVolumeGroupName=%s", lvmName)} +func CreateVGShared(vgName, lvmVolumeGroupName string, pvNames []string) (string, error) { + tmpStr := fmt.Sprintf("storage.deckhouse.io/lvmVolumeGroupName=%s", lvmVolumeGroupName) + args := []string{"vgcreate", "--shared", vgName} + args = append(args, pvNames...) + args = append(args, "--addtag", "storage.deckhouse.io/enabled=true", "--addtag", "--addtag", tmpStr) + extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -186,7 +187,7 @@ func CreateVGShared(vgName, lvmName string, pvNames []string) (string, error) { func CreateThinPool(thinPool v1alpha1.SpecThinPool, VGName string) (string, error) { args := []string{"lvcreate", "-L", thinPool.Size.String(), "-T", fmt.Sprintf("%s/%s", VGName, thinPool.Name)} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -200,7 +201,7 @@ func CreateThinPool(thinPool v1alpha1.SpecThinPool, VGName string) (string, erro func CreateThinLogicalVolume(vgName, tpName, lvName string, size int64) (string, error) { args := []string{"lvcreate", "-T", fmt.Sprintf("%s/%s", vgName, tpName), "-n", lvName, "-V", fmt.Sprintf("%dk", size/1024), "-W", "y", "-y"} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -218,7 +219,7 @@ func CreateThinLogicalVolume(vgName, tpName, lvName string, size int64) (string, func CreateThickLogicalVolume(vgName, lvName string, size int64) (string, error) { args := []string{"lvcreate", "-n", fmt.Sprintf("%s/%s", vgName, lvName), "-L", fmt.Sprintf("%dk", size/1024), "-W", "y", "-y"} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -231,9 +232,10 @@ func CreateThickLogicalVolume(vgName, lvName string, size int64) (string, error) } func ExtendVG(vgName string, paths []string) (string, error) { - args := []string{"vgextend", vgName, strings.Join(paths, " ")} + args := []string{"vgextend", vgName} + args = append(args, paths...) extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -248,7 +250,7 @@ func ExtendVG(vgName string, paths []string) (string, error) { func ExtendLV(size int64, vgName, lvName string) (string, error) { args := []string{"lvextend", "-L", fmt.Sprintf("%dk", size/1024), fmt.Sprintf("/dev/%s/%s", vgName, lvName)} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -263,7 +265,7 @@ func ExtendLV(size int64, vgName, lvName string) (string, error) { func ResizePV(pvName string) (string, error) { args := []string{"pvresize", pvName} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -278,7 +280,7 @@ func ResizePV(pvName string) (string, error) { func RemoveVG(vgName string) (string, error) { args := []string{"vgremove", vgName} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -291,9 +293,10 @@ func RemoveVG(vgName string) (string, error) { } func RemovePV(pvNames []string) (string, error) { - args := []string{"pvremove", strings.Join(pvNames, " ")} + args := []string{"pvremove"} + args = append(args, pvNames...) extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -307,7 +310,7 @@ func RemovePV(pvNames []string) (string, error) { func RemoveLV(vgName, lvName string) (string, error) { args := []string{"lvremove", fmt.Sprintf("/dev/%s/%s", vgName, lvName), "-y"} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -322,7 +325,7 @@ func VGChangeAddTag(vGName, tag string) (string, error) { var outs, stdErr bytes.Buffer args := []string{"vgchange", vGName, "--addtag", tag} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -336,7 +339,7 @@ func VGChangeDelTag(vGName, tag string) (string, error) { var outs, stdErr bytes.Buffer args := []string{"vgchange", vGName, "--deltag", tag} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -351,7 +354,7 @@ func LVChangeDelTag(lv internal.LVData, tag string) (string, error) { var outs, stdErr bytes.Buffer args := []string{"lvchange", tmpStr, "--deltag", tag} extendedArgs := lvmStaticExtendedArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -431,7 +434,7 @@ func extendArgs(args []string) []string { func lvmStaticExtendedArgs(args []string) []string { nsenterArgs := []string{"-t", "1", "-m", "-u", "-i", "-n", "-p"} - lvmStaticBin := []string{"--", "/opt/deckhouse/bin/sds/lvm.static"} + lvmStaticBin := []string{"--", internal.LVMCmd} result := append(nsenterArgs, lvmStaticBin...) return append(result, args...) } diff --git a/images/static-utils-copier/Dockerfile b/images/static-utils-copier/Dockerfile index 3cf46fb4..f7a39c11 100644 --- a/images/static-utils-copier/Dockerfile +++ b/images/static-utils-copier/Dockerfile @@ -1,8 +1,13 @@ ARG UBUNTU_UTILS_BUILDER=registry.deckhouse.io/base_images/ubuntu:jammy-20221130@sha256:c14c3b1242536729ce5227ff833144977b4e378723858fb73a4cf40ea6daaf6a ARG BASE_IMAGE=registry.deckhouse.io/base_images/alpine:3.16.3@sha256:5548e9172c24a1b0ca9afdd2bf534e265c94b12b36b3e0c0302f5853eaf00abb +ARG SOURCE_REPO ################################# FROM $UBUNTU_UTILS_BUILDER as lvm-builder +ARG SOURCE_REPO +# 2.03.23(2). Include fix for this issue: https://github.com/lvmteam/lvm2/issues/128 +ARG LVM_VERSION=d786a8f820d54ce87a919e6af5426c333c173b11 + RUN apt-get update && apt-get install -y \ build-essential \ autoconf \ @@ -11,19 +16,24 @@ RUN apt-get update && apt-get install -y \ pkg-config \ libdevmapper-dev \ libaio-dev \ + libblkid-dev \ thin-provisioning-tools \ git \ && rm -rf /var/lib/apt/lists/* -RUN git clone https://sourceware.org/git/lvm2.git /lvm2 - WORKDIR /lvm2 -RUN git checkout v2_03_22 -RUN ./configure --enable-static_link -RUN make + +RUN git clone ${SOURCE_REPO}/lvmteam/lvm2.git . && \ + git checkout ${LVM_VERSION} && \ + ./configure --enable-static_link --disable-silent-rules \ + --disable-readline --enable-blkid_wiping --build=x86_64-linux-gnu && \ + make ################################# FROM $UBUNTU_UTILS_BUILDER as util-linux-builder +ARG SOURCE_REPO +ARG UTIL_LINUX_VERSION=2.39.3 + RUN apt-get update && apt-get install -y \ build-essential \ git \ @@ -37,14 +47,14 @@ RUN apt-get update && apt-get install -y \ flex \ && rm -rf /var/lib/apt/lists/* -RUN git clone https://github.com/util-linux/util-linux.git /util-linux - WORKDIR /util-linux -RUN git checkout v2.39.3 -RUN ./autogen.sh -RUN ./configure LDFLAGS="-static" --enable-static-programs -disable-shared -RUN make LDFLAGS="--static" lsblk +RUN git clone ${SOURCE_REPO}/util-linux/util-linux.git . && \ + git checkout v${UTIL_LINUX_VERSION} && \ + ./autogen.sh && \ + ./configure LDFLAGS="-static" --enable-static-programs -disable-shared + +RUN make LDFLAGS="--static" lsblk ################################# FROM --platform=linux/amd64 $BASE_IMAGE @@ -53,10 +63,10 @@ RUN apk add --no-cache rsync WORKDIR / -ADD bin-copier.sh . +COPY --from=lvm-builder /lvm2/tools/lvm.static /static-bin/lvm.static +COPY --from=util-linux-builder /util-linux/lsblk /static-bin/lsblk.static -COPY --from=lvm-builder /lvm2/tools/lvm.static /usr/local/bin/flant/lvm.static -COPY --from=util-linux-builder /util-linux/lsblk /usr/local/bin/flant/lsblk.static +ADD bin-copier.sh . ENTRYPOINT ["/bin-copier.sh"] -CMD ["/usr/local/bin/flant", "/opt/deckhouse/bin/sds"] +CMD ["/static-bin", "/opt/deckhouse/sds"] diff --git a/templates/agent/daemonset.yaml b/templates/agent/daemonset.yaml index c0423cf9..e9c23970 100644 --- a/templates/agent/daemonset.yaml +++ b/templates/agent/daemonset.yaml @@ -56,8 +56,8 @@ spec: image: {{ include "helm_lib_module_image" (list . "staticUtilsCopier") }} imagePullPolicy: IfNotPresent volumeMounts: - - mountPath: /opt/deckhouse/bin/sds - name: opt-deckhouse-bin + - mountPath: /opt/deckhouse/sds + name: opt-deckhouse-sds resources: requests: {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 14 }} @@ -90,9 +90,6 @@ spec: {{- else if eq .Values.sdsNodeConfigurator.logLevel "TRACE" }} value: "4" {{- end }} - volumeMounts: - - mountPath: /opt/deckhouse/bin/sds - name: opt-deckhouse-bin resources: requests: {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 14 }} @@ -101,7 +98,7 @@ spec: {{- end }} volumes: - hostPath: - path: /opt/deckhouse/bin/sds + path: /opt/deckhouse/sds type: DirectoryOrCreate - name: opt-deckhouse-bin + name: opt-deckhouse-sds {{- end }} diff --git a/werf-giterminism.yaml b/werf-giterminism.yaml index 65c2a6da..1fc78d98 100644 --- a/werf-giterminism.yaml +++ b/werf-giterminism.yaml @@ -1,7 +1,7 @@ giterminismConfigVersion: 1 config: goTemplateRendering: # The rules for the Go-template functions - allowEnvVariables: [ /CI_.+/ ] + allowEnvVariables: [ /CI_.+/, SOURCE_REPO ] stapel: mount: allowFromPaths: