diff --git a/.github/workflows/build_dev.yml b/.github/workflows/build_dev.yml index 5a690d97..877bb804 100644 --- a/.github/workflows/build_dev.yml +++ b/.github/workflows/build_dev.yml @@ -7,6 +7,8 @@ env: MODULES_MODULE_SOURCE: ${{ vars.DEV_MODULE_SOURCE }} MODULES_REGISTRY_LOGIN: ${{ vars.DEV_MODULES_REGISTRY_LOGIN }} MODULES_REGISTRY_PASSWORD: ${{ secrets.DEV_MODULES_REGISTRY_PASSWORD }} + # for ex https://user:password@my-repo.com/group + SOURCE_REPO: "${{ secrets.SOURCE_REPO }}" on: pull_request: diff --git a/.github/workflows/build_prod.yml b/.github/workflows/build_prod.yml index 827c38a1..f159845c 100644 --- a/.github/workflows/build_prod.yml +++ b/.github/workflows/build_prod.yml @@ -8,6 +8,8 @@ env: MODULES_REGISTRY_LOGIN: ${{ vars.PROD_MODULES_REGISTRY_LOGIN }} MODULES_REGISTRY_PASSWORD: ${{ secrets.PROD_MODULES_REGISTRY_PASSWORD }} MODULES_MODULE_TAG: ${{ github.ref_name }} + # for ex https://user:password@my-repo.com/group + SOURCE_REPO: "${{ secrets.SOURCE_REPO }}" on: push: diff --git a/.werf/images.yaml b/.werf/images.yaml index 47b93856..0908d90a 100644 --- a/.werf/images.yaml +++ b/.werf/images.yaml @@ -8,6 +8,8 @@ image: images/{{ $ctx.ImageName }} context: images/{{ $ctx.ImageName }} dockerfile: Dockerfile +args: + SOURCE_REPO: {{ env "SOURCE_REPO" | default "https://github.com" }} {{- /* For werf.inc.yaml render content by providing the ImageName param. */ -}} {{- else }} diff --git a/crds/lvmvolumegroup.yaml b/crds/lvmvolumegroup.yaml index 7f19a599..00e5d21e 100644 --- a/crds/lvmvolumegroup.yaml +++ b/crds/lvmvolumegroup.yaml @@ -32,6 +32,10 @@ spec: properties: spec: type: object + required: + - type + - blockDeviceNames + - actualVGNameOnTheNode properties: type: type: string diff --git a/images/agent/Dockerfile b/images/agent/Dockerfile new file mode 100644 index 00000000..15e696e8 --- /dev/null +++ b/images/agent/Dockerfile @@ -0,0 +1,52 @@ +ARG GOLANG_20_ALPINE_BUILDER=registry.deckhouse.io/base_images/golang:1.20.5-alpine3.18@sha256:51a47fb0851397db2f506c15c426735bc23de31177cbdd962880c0879d1906a4 +ARG UBUNTU_UTILS_BUILDER=registry.deckhouse.io/base_images/ubuntu:jammy-20221130@sha256:c14c3b1242536729ce5227ff833144977b4e378723858fb73a4cf40ea6daaf6a +ARG BASE_IMAGE=registry.deckhouse.io/base_images/scratch@sha256:b054705fcc9f2205777d80a558d920c0b4209efdc3163c22b5bfcb5dda1db5fc + +################################# +FROM $UBUNTU_UTILS_BUILDER as util-linux-builder +ARG SOURCE_REPO +ARG UTIL_LINUX_VERSION=2.39.3 + +RUN apt-get update && apt-get install -y \ + build-essential \ + git \ + pkg-config \ + autopoint \ + autoconf \ + bison \ + libtool \ + automake \ + gettext \ + flex \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /util-linux + +RUN git clone ${SOURCE_REPO}/util-linux/util-linux.git . && \ + git checkout v${UTIL_LINUX_VERSION} && \ + ./autogen.sh && \ + ./configure LDFLAGS="-static" --enable-static-programs -disable-shared + +RUN make LDFLAGS="--static" nsenter + +################################# +FROM $GOLANG_20_ALPINE_BUILDER as agent-builder +WORKDIR /go/src + +ADD go.mod . +ADD go.sum . + +RUN go mod download + +COPY . . + +WORKDIR /go/src/cmd +RUN GOOS=linux GOARCH=amd64 go build -o sds-node-configurator-agent + +################################# +FROM --platform=linux/amd64 $BASE_IMAGE + +COPY --from=util-linux-builder /util-linux/nsenter /opt/deckhouse/sds/nsenter.static +COPY --from=agent-builder /go/src/cmd/sds-node-configurator-agent /go/src/cmd/sds-node-configurator-agent + +CMD ["/go/src/cmd/sds-node-configurator-agent"] diff --git a/images/agent/cmd/bc/main.go b/images/agent/cmd/main.go similarity index 100% rename from images/agent/cmd/bc/main.go rename to images/agent/cmd/main.go diff --git a/images/agent/config/config.go b/images/agent/config/config.go index 06c61d3e..ccf4d7ff 100644 --- a/images/agent/config/config.go +++ b/images/agent/config/config.go @@ -21,6 +21,7 @@ import ( "fmt" "os" "os/exec" + "sds-node-configurator/internal" "sds-node-configurator/pkg/logger" "time" ) @@ -80,7 +81,7 @@ func getMachineId() (string, error) { args := []string{"-m", "-u", "-i", "-n", "-p", "-t", "1", "cat", "/etc/machine-id"} var stdout bytes.Buffer - cmd := exec.Command("/usr/bin/nsenter", args...) + cmd := exec.Command(internal.NSENTERCmd, args...) cmd.Stdout = &stdout err := cmd.Run() if err != nil { diff --git a/images/agent/internal/const.go b/images/agent/internal/const.go index 094673c4..ddba703f 100644 --- a/images/agent/internal/const.go +++ b/images/agent/internal/const.go @@ -29,6 +29,9 @@ const ( ResizeDelta = "32Mi" KubernetesApiRequestLimit = 5 KubernetesApiRequestTimeout = 1 + NSENTERCmd = "/opt/deckhouse/sds/nsenter.static" + LSBLKCmd = "/opt/deckhouse/sds/lsblk.static" + LVMCmd = "/opt/deckhouse/sds/lvm.static" ) var ( diff --git a/images/agent/pkg/controller/lvm_volume_group_watcher.go b/images/agent/pkg/controller/lvm_volume_group_watcher.go index 1b991f1f..c50c2305 100644 --- a/images/agent/pkg/controller/lvm_volume_group_watcher.go +++ b/images/agent/pkg/controller/lvm_volume_group_watcher.go @@ -80,7 +80,7 @@ func RunLVMVolumeGroupWatcherController( } createFunc := func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupController] event create LVMVolumeGroup, name: %s", e.Object.GetName())) + log.Info(fmt.Sprintf("[RunLVMVolumeGroupController] Get event CREATE for resource LVMVolumeGroup, name: %s", e.Object.GetName())) request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} shouldRequeue, err := ReconcileLVMVG(ctx, metrics, e.Object.GetName(), e.Object.GetNamespace(), cfg.NodeName, log, cl) @@ -92,7 +92,7 @@ func RunLVMVolumeGroupWatcherController( } updateFunc := func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] update LVMVolumeGroupn, name: %s", e.ObjectNew.GetName())) + log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] Get event UPDATE for resource LVMVolumeGroup, name: %s", e.ObjectNew.GetName())) newLVG, ok := e.ObjectNew.(*v1alpha1.LvmVolumeGroup) if !ok { @@ -198,41 +198,34 @@ func ReconcileLVMVG( lvg, err := getLVMVolumeGroup(ctx, cl, metrics, objectNameSpace, objectName) if err != nil { log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error getLVMVolumeGroup, objectname: %s", objectName)) - return true, err + return false, err } - validation, status, err := ValidateLVMGroup(ctx, cl, metrics, lvg, objectNameSpace, nodeName) - if lvg == nil { err = errors.New("nil pointer detected") log.Error(err, "[ReconcileLVMVG] requested LVMVG group in nil") return true, err } - if status.Health == NonOperational { - health := status.Health - var message string - if err != nil { - message = err.Error() - } - - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] ValidateLVMGroup, resource name: %s, message: %s", lvg.Name, message)) - err = updateLVMVolumeGroupHealthStatus(ctx, cl, metrics, lvg.Name, lvg.Namespace, message, health) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error update LVMVolumeGroup %s", lvg.Name)) - return true, err - } - } + isOwnedByNode, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, lvg, objectNameSpace, nodeName) if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] validationLVMGroup failed, resource name: %s", lvg.Name)) - return false, err + log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error CheckLVMVGNodeOwnership, resource name: %s", lvg.Name)) + if status.Health == NonOperational { + health := status.Health + message := status.Message + log.Error(err, fmt.Sprintf("[ReconcileLVMVG] ValidateLVMGroup, resource name: %s, health: %s, phase: %s, message: %s", lvg.Name, health, status.Phase, message)) + err = updateLVMVolumeGroupHealthStatus(ctx, cl, metrics, lvg.Name, lvg.Namespace, message, health) + if err != nil { + log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error update LVMVolumeGroup %s", lvg.Name)) + return true, err + } + } + return true, err } - if validation == false { - err = errors.New("resource validation failed") - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] validation failed for resource, name: %s", lvg.Name)) - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] status.Message = %s", status.Message)) - return false, err + if !isOwnedByNode { + log.Debug(fmt.Sprintf("[ReconcileLVMVG] resource is not owned by node, name: %s, skip it", lvg.Name)) + return false, nil } log.Info("[ReconcileLVMVG] validation passed") @@ -272,14 +265,14 @@ func ReconcileLVMVG( } log.Info(fmt.Sprintf(`[ReconcileLVMVG] event was created for resource, name: %s`, lvg.Name)) - existVG, err := ExistVG(lvg.Spec.ActualVGNameOnTheNode, log, metrics) + isVgExist, vg, err := GetVGFromNode(lvg.Spec.ActualVGNameOnTheNode, log, metrics) if err != nil { log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error ExistVG, name: %s", lvg.Spec.ActualVGNameOnTheNode)) return true, err } - if existVG { - log.Debug("[ReconcileLVMVG] tries to update ") - updated, err := UpdateLVMVolumeGroupTagsName(log, metrics, lvg) + if isVgExist { + log.Debug("[ReconcileLVMVG] start UpdateLVMVolumeGroupTagsName for r " + lvg.Name) + updated, err := UpdateLVMVolumeGroupTagsName(log, metrics, vg, lvg) if err != nil { log.Error(err, fmt.Sprintf("[ReconcileLVMVG] unable to update VG tags on VG, name: %s", lvg.Spec.ActualVGNameOnTheNode)) return true, err @@ -292,16 +285,16 @@ func ReconcileLVMVG( } log.Info("[ReconcileLVMVG] validation and choosing the type of operation") - extendPVs, shrinkPVs, err := ValidateTypeLVMGroup(ctx, cl, metrics, lvg, log) + extendPVs, shrinkPVs, err := ValidateOperationTypeLVMGroup(ctx, cl, metrics, lvg, log) if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error ValidateTypeLVMGroup, name: %s", lvg.Name)) + log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error ValidateOperationTypeLVMGroup, name: %s", lvg.Name)) return true, err } - if err == nil && extendPVs == nil && shrinkPVs == nil { - log.Warning("[ReconcileLVMVG] ValidateTypeLVMGroup FAIL") - //todo retry and send message - } + // if err == nil && extendPVs == nil && shrinkPVs == nil { + // log.Warning(fmt.Sprintf("[ReconcileLVMVG] ValidateOperationTypeLVMGroup FAIL for resource %s", lvg.Name)) + // //todo retry and send message + // } log.Debug("----- extendPVs list -----") for _, pvExt := range extendPVs { @@ -449,6 +442,7 @@ func ReconcileLVMVG( log.Error(err, fmt.Sprintf("[ReconcileLVMVG] error CreateEventLVMVolumeGroup, resource name: %s", lvg.Name)) } + log.Debug("[ReconcileLVMVG] Start CreateVGComplex function for resource " + lvg.Name) err := CreateVGComplex(ctx, cl, metrics, lvg, log) if err != nil { log.Error(err, fmt.Sprintf("[ReconcileLVMVG] unable to CreateVGComplex for resource, name: %s", lvg.Name)) diff --git a/images/agent/pkg/controller/lvm_volume_group_watcher_func.go b/images/agent/pkg/controller/lvm_volume_group_watcher_func.go index 1b7ac562..1a306dba 100644 --- a/images/agent/pkg/controller/lvm_volume_group_watcher_func.go +++ b/images/agent/pkg/controller/lvm_volume_group_watcher_func.go @@ -109,7 +109,7 @@ func getBlockDevice(ctx context.Context, cl client.Client, metrics monitoring.Me return obj, nil } -func ValidateLVMGroup(ctx context.Context, cl client.Client, metrics monitoring.Metrics, lvmVolumeGroup *v1alpha1.LvmVolumeGroup, namespace, nodeName string) (bool, *StatusLVMVolumeGroup, error) { +func CheckLVMVGNodeOwnership(ctx context.Context, cl client.Client, metrics monitoring.Metrics, lvmVolumeGroup *v1alpha1.LvmVolumeGroup, namespace, nodeName string) (bool, *StatusLVMVolumeGroup, error) { status := StatusLVMVolumeGroup{} if lvmVolumeGroup == nil { return false, nil, errors.New("lvmVolumeGroup is nil") @@ -120,7 +120,10 @@ func ValidateLVMGroup(ctx context.Context, cl client.Client, metrics monitoring. for _, blockDev := range lvmVolumeGroup.Spec.BlockDeviceNames { device, err := getBlockDevice(ctx, cl, metrics, namespace, blockDev) if err != nil { + err = fmt.Errorf("error getBlockDevice: %s", err) status.Health = NonOperational + status.Phase = Failed + status.Message = err.Error() return false, &status, err } if device.Status.NodeName == nodeName { @@ -129,6 +132,13 @@ func ValidateLVMGroup(ctx context.Context, cl client.Client, metrics monitoring. } if membership == len(lvmVolumeGroup.Spec.BlockDeviceNames) { + if lvmVolumeGroup.Spec.ActualVGNameOnTheNode == "" { + err := fmt.Errorf("actualVGNameOnTheNode is empty") + status.Health = NonOperational + status.Phase = Failed + status.Message = "actualVGNameOnTheNode is empty" + return false, &status, err + } return true, &status, nil } @@ -136,7 +146,7 @@ func ValidateLVMGroup(ctx context.Context, cl client.Client, metrics monitoring. status.Health = NonOperational status.Phase = Failed status.Message = "selected block devices are from different nodes for local LVMVolumeGroup" - return false, &status, errors.New("wrong block devices selected") + return false, &status, nil } if membership == 0 { @@ -144,31 +154,10 @@ func ValidateLVMGroup(ctx context.Context, cl client.Client, metrics monitoring. } } - if lvmVolumeGroup.Spec.Type == Shared { - if len(lvmVolumeGroup.Spec.BlockDeviceNames) != 1 { - status.Health = NonOperational - status.Phase = Failed - status.Message = "several block devices are selected for the shared LVMVolumeGroup" - return false, &status, errors.New(status.Message) - } - - singleBD := lvmVolumeGroup.Spec.BlockDeviceNames[0] - bd, err := getBlockDevice(ctx, cl, metrics, namespace, singleBD) - if err != nil { - status.Health = NonOperational - status.Phase = Failed - status.Message = "selected unknown block device for the shared LVMVolumeGroup" - return false, &status, err - } - - if bd.Status.NodeName == nodeName { - return true, &status, nil - } - } return false, &status, nil } -func ValidateTypeLVMGroup(ctx context.Context, cl client.Client, metrics monitoring.Metrics, lvmVolumeGroup *v1alpha1.LvmVolumeGroup, l logger.Logger) (extendPV, shrinkPV []string, err error) { +func ValidateOperationTypeLVMGroup(ctx context.Context, cl client.Client, metrics monitoring.Metrics, lvmVolumeGroup *v1alpha1.LvmVolumeGroup, l logger.Logger) (extendPV, shrinkPV []string, err error) { pvs, cmdStr, _, err := utils.GetAllPVs() l.Debug(fmt.Sprintf("GetAllPVs exec cmd: %s", cmdStr)) if err != nil { @@ -182,36 +171,27 @@ func ValidateTypeLVMGroup(ctx context.Context, cl client.Client, metrics monitor } if dev.Status.Consumable == true { - extendPV = append(extendPV, dev.Status.Path) + isReallyConsumable := true + for _, pv := range pvs { + if pv.PVName == dev.Status.Path && pv.VGName == lvmVolumeGroup.Spec.ActualVGNameOnTheNode { + isReallyConsumable = false + break + } + } + if isReallyConsumable { + extendPV = append(extendPV, dev.Status.Path) + } + continue } if dev.Status.ActualVGNameOnTheNode != lvmVolumeGroup.Spec.ActualVGNameOnTheNode && (len(dev.Status.VGUuid) != 0) { - return nil, nil, nil - // validation fail, send message => LVG ? + err = fmt.Errorf("block device %s is already in use by another VG: %s with uuid %s. Our VG: %s with uuid %s", devName, dev.Status.ActualVGNameOnTheNode, dev.Status.VGUuid, lvmVolumeGroup.Spec.ActualVGNameOnTheNode, dev.Status.VGUuid) + return nil, nil, err } + // TODO: realisation of shrinkPV } - var flag bool - - for _, pv := range pvs { - if pv.VGName == lvmVolumeGroup.Spec.ActualVGNameOnTheNode { - flag = false - for _, devName := range lvmVolumeGroup.Spec.BlockDeviceNames { - dev, err := getBlockDevice(ctx, cl, metrics, lvmVolumeGroup.Namespace, devName) - if err != nil { - return nil, nil, err - } - - if pv.PVUuid == dev.Status.PVUuid { - flag = true - } - } - } - if !flag && pv.VGName == lvmVolumeGroup.Spec.ActualVGNameOnTheNode { - shrinkPV = append(shrinkPV, pv.PVName) - } - } return extendPV, shrinkPV, nil } @@ -332,24 +312,25 @@ func DeleteVG(vgName string, log logger.Logger, metrics monitoring.Metrics) erro return nil } -func ExistVG(vgName string, log logger.Logger, metrics monitoring.Metrics) (bool, error) { +func GetVGFromNode(vgName string, log logger.Logger, metrics monitoring.Metrics) (bool, internal.VGData, error) { start := time.Now() - vg, command, _, err := utils.GetAllVGs() + var vg internal.VGData + vgs, command, _, err := utils.GetAllVGs() metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgs").Observe(metrics.GetEstimatedTimeInSeconds(start)) metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgs").Inc() log.Debug(command) if err != nil { metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "vgs").Inc() log.Error(err, " error CreateEventLVMVolumeGroup") - return false, err + return false, vg, err } - for _, v := range vg { - if v.VGName == vgName { - return true, nil + for _, vg := range vgs { + if vg.VGName == vgName { + return true, vg, nil } } - return false, nil + return false, vg, nil } func ValidateConsumableDevices(ctx context.Context, cl client.Client, metrics monitoring.Metrics, group *v1alpha1.LvmVolumeGroup) (bool, error) { @@ -422,7 +403,8 @@ func CreateVGComplex(ctx context.Context, cl client.Client, metrics monitoring.M return err } if !allDevicesConsumable { - l.Error(err, " error not all devices is consumable") + err = fmt.Errorf("not all devices is consumable") + l.Error(err, "error ValidateConsumableDevices") return err } paths, err := GetPathsConsumableDevicesFromLVMVG(ctx, cl, metrics, group) @@ -473,31 +455,13 @@ func CreateVGComplex(ctx context.Context, cl client.Client, metrics monitoring.M return nil } -func UpdateLVMVolumeGroupTagsName(log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LvmVolumeGroup) (bool, error) { +func UpdateLVMVolumeGroupTagsName(log logger.Logger, metrics monitoring.Metrics, vg internal.VGData, lvg *v1alpha1.LvmVolumeGroup) (bool, error) { const tag = "storage.deckhouse.io/lvmVolumeGroupName" - start := time.Now() - vgs, cmd, _, err := utils.GetAllVGs() - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgs").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgs").Inc() - log.Debug(fmt.Sprintf("[ReconcileLVMVG] exec cmd: %s", cmd)) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMVG] unable to get VG by resource, name: %s", lvg.Name)) - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "vgs").Inc() - return false, err - } - - var vg internal.VGData - for _, v := range vgs { - if v.VGName == lvg.Spec.ActualVGNameOnTheNode { - vg = v - } - } - found, tagName := CheckTag(vg.VGTags) if found && lvg.Name != tagName { - start = time.Now() - cmd, err = utils.VGChangeDelTag(vg.VGName, fmt.Sprintf("%s=%s", tag, tagName)) + start := time.Now() + cmd, err := utils.VGChangeDelTag(vg.VGName, fmt.Sprintf("%s=%s", tag, tagName)) metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgchange").Observe(metrics.GetEstimatedTimeInSeconds(start)) metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgchange").Inc() log.Debug(fmt.Sprintf("[UpdateLVMVolumeGroupTagsName] exec cmd: %s", cmd)) diff --git a/images/agent/pkg/controller/lvm_volume_group_watcher_test.go b/images/agent/pkg/controller/lvm_volume_group_watcher_test.go index 661d86a4..fce7c7fe 100644 --- a/images/agent/pkg/controller/lvm_volume_group_watcher_test.go +++ b/images/agent/pkg/controller/lvm_volume_group_watcher_test.go @@ -2,12 +2,13 @@ package controller import ( "context" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sds-node-configurator/api/v1alpha1" "sds-node-configurator/pkg/monitoring" "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { @@ -318,7 +319,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }) t.Run("ValidateLVMGroup_lvg_is_nil_returns_error", func(t *testing.T) { - valid, obj, err := ValidateLVMGroup(ctx, cl, metrics, nil, "test_ns", "test_node") + valid, obj, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, nil, "test_ns", "test_node") assert.False(t, valid) assert.Nil(t, obj) assert.EqualError(t, err, "lvmVolumeGroup is nil") @@ -350,7 +351,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testObj, namespace, "test_node") + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testObj, namespace, "test_node") assert.False(t, valid) if assert.NotNil(t, status) { assert.Equal(t, NonOperational, status.Health) @@ -431,7 +432,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testLvg, namespace, testNode) + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testLvg, namespace, testNode) assert.False(t, valid) if assert.NotNil(t, status) { assert.Equal(t, NonOperational, status.Health) @@ -513,7 +514,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testLvg, namespace, "another-node") + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testLvg, namespace, "another-node") assert.False(t, valid) if assert.NotNil(t, status) { assert.Equal(t, NonOperational, status.Health) @@ -595,7 +596,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testLvg, namespace, testNode) + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testLvg, namespace, testNode) assert.True(t, valid) if assert.NotNil(t, status) { assert.Equal(t, "", status.Health) @@ -677,7 +678,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testLvg, namespace, testNode) + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testLvg, namespace, testNode) assert.False(t, valid) if assert.NotNil(t, status) { assert.Equal(t, NonOperational, status.Health) @@ -749,7 +750,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testLvg, namespace, testNode) + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testLvg, namespace, testNode) assert.False(t, valid) if assert.NotNil(t, status) { assert.Equal(t, NonOperational, status.Health) @@ -821,7 +822,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - valid, status, err := ValidateLVMGroup(ctx, cl, metrics, testLvg, namespace, testNode) + valid, status, err := CheckLVMVGNodeOwnership(ctx, cl, metrics, testLvg, namespace, testNode) assert.True(t, valid) if assert.NotNil(t, status) { assert.Equal(t, "", status.Health) diff --git a/images/agent/pkg/utils/commands.go b/images/agent/pkg/utils/commands.go index 0c553f28..91e15a05 100644 --- a/images/agent/pkg/utils/commands.go +++ b/images/agent/pkg/utils/commands.go @@ -23,18 +23,13 @@ import ( "os/exec" "sds-node-configurator/api/v1alpha1" "sds-node-configurator/internal" - "strings" -) - -const ( - nsenter = "/usr/bin/nsenter" ) func GetBlockDevices() ([]internal.Device, string, error) { var outs bytes.Buffer - args := []string{"lsblk", "-J", "-lpfb", "-no", "name,MOUNTPOINT,PARTUUID,HOTPLUG,MODEL,SERIAL,SIZE,FSTYPE,TYPE,WWN,KNAME,PKNAME,ROTA"} + args := []string{internal.LSBLKCmd, "-J", "-lpfb", "-no", "name,MOUNTPOINT,PARTUUID,HOTPLUG,MODEL,SERIAL,SIZE,FSTYPE,TYPE,WWN,KNAME,PKNAME,ROTA"} extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs var stderr bytes.Buffer @@ -56,8 +51,8 @@ func GetBlockDevices() ([]internal.Device, string, error) { func GetAllVGs() (data []internal.VGData, command string, stdErr bytes.Buffer, err error) { var outs bytes.Buffer args := []string{"vgs", "-o", "+uuid,tags,shared", "--units", "B", "--nosuffix", "--reportformat", "json"} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -76,8 +71,8 @@ func GetAllVGs() (data []internal.VGData, command string, stdErr bytes.Buffer, e func GetAllLVs() (data []internal.LVData, command string, stdErr bytes.Buffer, err error) { var outs bytes.Buffer args := []string{"lvs", "-o", "+vg_uuid,tags", "--units", "B", "--nosuffix", "--reportformat", "json"} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -98,8 +93,8 @@ func GetLV(vgName, lvName string) (lvData internal.LVData, command string, stdEr lvData = internal.LVData{} lvPath := fmt.Sprintf("/dev/%s/%s", vgName, lvName) args := []string{"lvs", "-o", "+vg_uuid,tags", "--units", "B", "--nosuffix", "--reportformat", "json", lvPath} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -119,8 +114,8 @@ func GetLV(vgName, lvName string) (lvData internal.LVData, command string, stdEr func GetAllPVs() (data []internal.PVData, command string, stdErr bytes.Buffer, err error) { var outs bytes.Buffer args := []string{"pvs", "-o", "+pv_used,pv_uuid,vg_tags,vg_uuid", "--units", "B", "--nosuffix", "--reportformat", "json"} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -138,8 +133,8 @@ func GetAllPVs() (data []internal.PVData, command string, stdErr bytes.Buffer, e func CreatePV(path string) (string, error) { args := []string{"pvcreate", path} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -151,12 +146,14 @@ func CreatePV(path string) (string, error) { return cmd.String(), nil } -func CreateVGLocal(vgName, lvmName string, pvNames []string) (string, error) { - tmpStr := fmt.Sprintf("storage.deckhouse.io/lvmVolumeGroupName=%s", lvmName) - args := []string{"vgcreate", vgName, strings.Join(pvNames, " "), "--addtag", "storage.deckhouse.io/enabled=true", "--addtag", tmpStr} +func CreateVGLocal(vgName, lvmVolumeGroupName string, pvNames []string) (string, error) { + tmpStr := fmt.Sprintf("storage.deckhouse.io/lvmVolumeGroupName=%s", lvmVolumeGroupName) + args := []string{"vgcreate", vgName} + args = append(args, pvNames...) + args = append(args, "--addtag", "storage.deckhouse.io/enabled=true", "--addtag", tmpStr) - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -168,10 +165,14 @@ func CreateVGLocal(vgName, lvmName string, pvNames []string) (string, error) { return cmd.String(), nil } -func CreateVGShared(vgName, lvmName string, pvNames []string) (string, error) { - args := []string{"vgcreate", "--shared", vgName, strings.Join(pvNames, " "), "--addtag", "storage.deckhouse.io/enabled=true", "--addtag", fmt.Sprintf("storage.deckhouse.io/lvmVolumeGroupName=%s", lvmName)} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) +func CreateVGShared(vgName, lvmVolumeGroupName string, pvNames []string) (string, error) { + tmpStr := fmt.Sprintf("storage.deckhouse.io/lvmVolumeGroupName=%s", lvmVolumeGroupName) + args := []string{"vgcreate", "--shared", vgName} + args = append(args, pvNames...) + args = append(args, "--addtag", "storage.deckhouse.io/enabled=true", "--addtag", "--addtag", tmpStr) + + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -185,8 +186,8 @@ func CreateVGShared(vgName, lvmName string, pvNames []string) (string, error) { func CreateThinPool(thinPool v1alpha1.SpecThinPool, VGName string) (string, error) { args := []string{"lvcreate", "-L", thinPool.Size.String(), "-T", fmt.Sprintf("%s/%s", VGName, thinPool.Name)} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -199,8 +200,8 @@ func CreateThinPool(thinPool v1alpha1.SpecThinPool, VGName string) (string, erro func CreateThinLogicalVolume(vgName, tpName, lvName string, size int64) (string, error) { args := []string{"lvcreate", "-T", fmt.Sprintf("%s/%s", vgName, tpName), "-n", lvName, "-V", fmt.Sprintf("%dk", size/1024), "-W", "y", "-y"} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -217,8 +218,8 @@ func CreateThinLogicalVolume(vgName, tpName, lvName string, size int64) (string, func CreateThickLogicalVolume(vgName, lvName string, size int64) (string, error) { args := []string{"lvcreate", "-n", fmt.Sprintf("%s/%s", vgName, lvName), "-L", fmt.Sprintf("%dk", size/1024), "-W", "y", "-y"} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -231,9 +232,10 @@ func CreateThickLogicalVolume(vgName, lvName string, size int64) (string, error) } func ExtendVG(vgName string, paths []string) (string, error) { - args := []string{"vgextend", vgName, strings.Join(paths, " ")} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + args := []string{"vgextend", vgName} + args = append(args, paths...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -247,8 +249,8 @@ func ExtendVG(vgName string, paths []string) (string, error) { func ExtendLV(size int64, vgName, lvName string) (string, error) { args := []string{"lvextend", "-L", fmt.Sprintf("%dk", size/1024), fmt.Sprintf("/dev/%s/%s", vgName, lvName)} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -262,8 +264,8 @@ func ExtendLV(size int64, vgName, lvName string) (string, error) { func ResizePV(pvName string) (string, error) { args := []string{"pvresize", pvName} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -277,8 +279,8 @@ func ResizePV(pvName string) (string, error) { func RemoveVG(vgName string) (string, error) { args := []string{"vgremove", vgName} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -291,9 +293,10 @@ func RemoveVG(vgName string) (string, error) { } func RemovePV(pvNames []string) (string, error) { - args := []string{"pvremove", strings.Join(pvNames, " ")} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + args := []string{"pvremove"} + args = append(args, pvNames...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -306,8 +309,8 @@ func RemovePV(pvNames []string) (string, error) { func RemoveLV(vgName, lvName string) (string, error) { args := []string{"lvremove", fmt.Sprintf("/dev/%s/%s", vgName, lvName), "-y"} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) var stderr bytes.Buffer cmd.Stderr = &stderr @@ -321,8 +324,8 @@ func RemoveLV(vgName, lvName string) (string, error) { func VGChangeAddTag(vGName, tag string) (string, error) { var outs, stdErr bytes.Buffer args := []string{"vgchange", vGName, "--addtag", tag} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -335,8 +338,8 @@ func VGChangeAddTag(vGName, tag string) (string, error) { func VGChangeDelTag(vGName, tag string) (string, error) { var outs, stdErr bytes.Buffer args := []string{"vgchange", vGName, "--deltag", tag} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -350,8 +353,8 @@ func LVChangeDelTag(lv internal.LVData, tag string) (string, error) { tmpStr := fmt.Sprintf("/dev/%s/%s", lv.VGName, lv.LVName) var outs, stdErr bytes.Buffer args := []string{"lvchange", tmpStr, "--deltag", tag} - extendedArgs := extendArgs(args) - cmd := exec.Command(nsenter, extendedArgs...) + extendedArgs := lvmStaticExtendedArgs(args) + cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) cmd.Stdout = &outs cmd.Stderr = &stdErr @@ -428,3 +431,10 @@ func extendArgs(args []string) []string { nsenterArgs := []string{"-t", "1", "-m", "-u", "-i", "-n", "-p"} return append(nsenterArgs, args...) } + +func lvmStaticExtendedArgs(args []string) []string { + nsenterArgs := []string{"-t", "1", "-m", "-u", "-i", "-n", "-p"} + lvmStaticBin := []string{"--", internal.LVMCmd} + result := append(nsenterArgs, lvmStaticBin...) + return append(result, args...) +} diff --git a/images/agent/werf.inc.yaml b/images/agent/werf.inc.yaml deleted file mode 100644 index ae0a153f..00000000 --- a/images/agent/werf.inc.yaml +++ /dev/null @@ -1,52 +0,0 @@ ---- -artifact: agent-build -from: registry.deckhouse.io/base_images/golang:1.20.5-alpine3.18@sha256:51a47fb0851397db2f506c15c426735bc23de31177cbdd962880c0879d1906a4 -git: -- add: /images/agent - to: /src - excludePaths: - - "**/*.md" - - "**/*.yaml" - stageDependencies: - install: - - go.mod - - go.sum - setup: - - "**/*.go" -mount: -- fromPath: ~/go-pkg-cache - to: /go/pkg -ansible: - install: - - shell: go mod download - args: - chdir: /src - setup: - - shell: go build -o sds-node-configurator-agent . - args: - chdir: /src/cmd/bc - environment: - GOOS: "linux" - GOARCH: "amd64" - CGO_ENABLED: "0" ---- -image: images/{{ .ImageName }} -from: registry.deckhouse.io/base_images/ubuntu:jammy-20221130@sha256:c14c3b1242536729ce5227ff833144977b4e378723858fb73a4cf40ea6daaf6a -docker: - ENTRYPOINT: ["/sds-node-configurator-agent"] -import: -- artifact: agent-build - add: /src/cmd/bc/sds-node-configurator-agent - to: /sds-node-configurator-agent - before: setup -ansible: - install: - - name: "Install packages" - apt: - name: - - lvm2 - - curl - - kmod - update_cache: yes - setup: - - shell: sed -i 's/udev_rules.*=.*/udev_rules=0/ ; s/udev_sync.*=.*/udev_sync=0/ ; s/obtain_device_list_from_udev.*=.*/obtain_device_list_from_udev=0/' /etc/lvm/lvm.conf diff --git a/images/static-utils-copier/Dockerfile b/images/static-utils-copier/Dockerfile new file mode 100644 index 00000000..f7a39c11 --- /dev/null +++ b/images/static-utils-copier/Dockerfile @@ -0,0 +1,72 @@ +ARG UBUNTU_UTILS_BUILDER=registry.deckhouse.io/base_images/ubuntu:jammy-20221130@sha256:c14c3b1242536729ce5227ff833144977b4e378723858fb73a4cf40ea6daaf6a +ARG BASE_IMAGE=registry.deckhouse.io/base_images/alpine:3.16.3@sha256:5548e9172c24a1b0ca9afdd2bf534e265c94b12b36b3e0c0302f5853eaf00abb +ARG SOURCE_REPO + +################################# +FROM $UBUNTU_UTILS_BUILDER as lvm-builder +ARG SOURCE_REPO +# 2.03.23(2). Include fix for this issue: https://github.com/lvmteam/lvm2/issues/128 +ARG LVM_VERSION=d786a8f820d54ce87a919e6af5426c333c173b11 + +RUN apt-get update && apt-get install -y \ + build-essential \ + autoconf \ + automake \ + libtool \ + pkg-config \ + libdevmapper-dev \ + libaio-dev \ + libblkid-dev \ + thin-provisioning-tools \ + git \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /lvm2 + +RUN git clone ${SOURCE_REPO}/lvmteam/lvm2.git . && \ + git checkout ${LVM_VERSION} && \ + ./configure --enable-static_link --disable-silent-rules \ + --disable-readline --enable-blkid_wiping --build=x86_64-linux-gnu && \ + make + +################################# +FROM $UBUNTU_UTILS_BUILDER as util-linux-builder +ARG SOURCE_REPO +ARG UTIL_LINUX_VERSION=2.39.3 + +RUN apt-get update && apt-get install -y \ + build-essential \ + git \ + pkg-config \ + autopoint \ + autoconf \ + bison \ + libtool \ + automake \ + gettext \ + flex \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /util-linux + +RUN git clone ${SOURCE_REPO}/util-linux/util-linux.git . && \ + git checkout v${UTIL_LINUX_VERSION} && \ + ./autogen.sh && \ + ./configure LDFLAGS="-static" --enable-static-programs -disable-shared + +RUN make LDFLAGS="--static" lsblk + +################################# +FROM --platform=linux/amd64 $BASE_IMAGE + +RUN apk add --no-cache rsync + +WORKDIR / + +COPY --from=lvm-builder /lvm2/tools/lvm.static /static-bin/lvm.static +COPY --from=util-linux-builder /util-linux/lsblk /static-bin/lsblk.static + +ADD bin-copier.sh . + +ENTRYPOINT ["/bin-copier.sh"] +CMD ["/static-bin", "/opt/deckhouse/sds"] diff --git a/images/static-utils-copier/bin-copier.sh b/images/static-utils-copier/bin-copier.sh new file mode 100755 index 00000000..372f3722 --- /dev/null +++ b/images/static-utils-copier/bin-copier.sh @@ -0,0 +1,48 @@ +#!/bin/sh + +# Copyright 2024 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export SOURCE_DIR=$1 +export DEST_DIR=$2 + +if [ -z "$SOURCE_DIR" ]; then + echo "Source directory is not specified" + echo "Usage: $0 " + exit 1 +fi + +if [ ! -d "$SOURCE_DIR" ]; then + echo "Source directory $SOURCE_DIR does not exist" + exit 1 +fi + +if [ -z "$DEST_DIR" ]; then + echo "Destination directory is not specified" + echo "Usage: $0 " + exit 1 +fi + +if [ ! -d "$DEST_DIR" ]; then + echo "Destination directory $DEST_DIR does not exist" + exit 1 +fi + +rsync -av --checksum "$SOURCE_DIR/" "$DEST_DIR" +if [ $? -ne 0 ]; then + echo "Failed to copy files from $SOURCE_DIR to $DEST_DIR" + exit 1 +fi + +echo "Finished copying files from $SOURCE_DIR to $DEST_DIR" diff --git a/templates/agent/daemonset.yaml b/templates/agent/daemonset.yaml index eb762089..e9c23970 100644 --- a/templates/agent/daemonset.yaml +++ b/templates/agent/daemonset.yaml @@ -1,3 +1,13 @@ +{{- define "static_utils_copier_resources" }} +cpu: 10m +memory: 25Mi +{{- end }} + +{{- define "sds_node_configurator_agent_resources" }} +cpu: 50m +memory: 50Mi +{{- end }} + {{- if not .Values.sdsNodeConfigurator.disableDs }} --- apiVersion: apps/v1 @@ -41,6 +51,19 @@ spec: - name: {{ .Chart.Name }}-module-registry serviceAccountName: sds-node-configurator hostPID: true + initContainers: + - name: static-utils-copier + image: {{ include "helm_lib_module_image" (list . "staticUtilsCopier") }} + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /opt/deckhouse/sds + name: opt-deckhouse-sds + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 14 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "static_utils_copier_resources" . | nindent 14 }} +{{- end }} containers: - name: sds-node-configurator-agent image: {{ include "helm_lib_module_image" (list . "agent") }} @@ -67,4 +90,15 @@ spec: {{- else if eq .Values.sdsNodeConfigurator.logLevel "TRACE" }} value: "4" {{- end }} + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 14 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "sds_node_configurator_agent_resources" . | nindent 14 }} +{{- end }} + volumes: + - hostPath: + path: /opt/deckhouse/sds + type: DirectoryOrCreate + name: opt-deckhouse-sds {{- end }} diff --git a/werf-giterminism.yaml b/werf-giterminism.yaml index 65c2a6da..1fc78d98 100644 --- a/werf-giterminism.yaml +++ b/werf-giterminism.yaml @@ -1,7 +1,7 @@ giterminismConfigVersion: 1 config: goTemplateRendering: # The rules for the Go-template functions - allowEnvVariables: [ /CI_.+/ ] + allowEnvVariables: [ /CI_.+/, SOURCE_REPO ] stapel: mount: allowFromPaths: