Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Restore topolvm support #108

Open
wants to merge 5 commits into
base: datadog-master-12.0
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 46 additions & 5 deletions cluster-autoscaler/processors/datadog/common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package common
import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"

schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
)
Expand All @@ -31,20 +32,39 @@ const (
// nodes offering local storage, and currently injected as requests on
// Pending pods having a PVC for local-data volumes.
DatadogLocalDataResource apiv1.ResourceName = "storageclass/local-data"
// DatadogEphemeralLocalDataResource is a virtual resource placed on new or future
// nodes offering ephemeral local storage, and currently injected as requests on
// Pending pods having an ephemeral PVC for local-data volumes.
DatadogEphemeralLocalDataResource apiv1.ResourceName = "storageclass/ephemeral-local-data"

// DatadogLocalStorageProvisionerLabel is indicating which technology will be used to provide local storage
DatadogLocalStorageProvisionerLabel = "nodegroups.datadoghq.com/local-storage-provisioner"
// DatadogInitialStorageCapacityLabel is storing the amount of local storage a new node will have in the beginning
// e.g. nodegroups.datadoghq.com/initial-storage-capacity=100Gi
DatadogInitialStorageCapacityLabel = "nodegroups.datadoghq.com/initial-storage-capacity"

// DatadogStorageProvisionerTopoLVM is the storage provisioner label value to use for topolvm implementation
DatadogStorageProvisionerTopoLVM = "topolvm"
)

var (
// DatadogLocalDataQuantity is the default amount of DatadogLocalDataResource
DatadogLocalDataQuantity = resource.NewQuantity(1, resource.DecimalSI)
)

// NodeHasLocalData returns true if the node holds a local-storage:true label
// NodeHasLocalData returns true if the node holds a local-storage:true or local-storage-provisioner:<any> label
func NodeHasLocalData(node *apiv1.Node) bool {
if node == nil {
return false
}
value, ok := node.GetLabels()[DatadogLocalStorageLabel]
return ok && value == "true"

labels := node.GetLabels()

_, newStorageOk := labels[DatadogLocalStorageProvisionerLabel]
value, ok := labels[DatadogLocalStorageLabel]

// the node should have either the local-stoarge or local-storage-provisioner label
return (ok && value == "true") || newStorageOk
}

// SetNodeLocalDataResource updates a NodeInfo with the DatadogLocalDataResource resource
Expand All @@ -61,7 +81,28 @@ func SetNodeLocalDataResource(nodeInfo *schedulerframework.NodeInfo) {
if node.Status.Capacity == nil {
node.Status.Capacity = apiv1.ResourceList{}
}
node.Status.Capacity[DatadogLocalDataResource] = DatadogLocalDataQuantity.DeepCopy()
node.Status.Allocatable[DatadogLocalDataResource] = DatadogLocalDataQuantity.DeepCopy()

provisioner := node.Labels[DatadogLocalStorageProvisionerLabel]
switch provisioner {
case DatadogStorageProvisionerTopoLVM:
capacity := node.Labels[DatadogInitialStorageCapacityLabel]
capacityResource, err := resource.ParseQuantity(capacity)
if err == nil {
node.Status.Capacity[DatadogEphemeralLocalDataResource] = capacityResource.DeepCopy()
node.Status.Allocatable[DatadogEphemeralLocalDataResource] = capacityResource.DeepCopy()
} else {
klog.Warningf("failed to attach capacity information (%s) to node (%s): %v", capacity, node.Name, err)
}
default:
// The old local-storage provisioner is using a different label for identification.
// So if we cannot find any of the new options, we should check if it's using the old system and otherwise print a warning.
if val, ok := node.Labels[DatadogLocalStorageLabel]; ok && val == "true" {
node.Status.Capacity[DatadogLocalDataResource] = DatadogLocalDataQuantity.DeepCopy()
node.Status.Allocatable[DatadogLocalDataResource] = DatadogLocalDataQuantity.DeepCopy()
} else {
klog.Warningf("this should never be reached. local storage provisioner (%s) is unknown and cannot be used on node: %s", provisioner, node.Name)
}
}

nodeInfo.SetNode(node)
}
81 changes: 80 additions & 1 deletion cluster-autoscaler/processors/datadog/common/common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,15 @@ func TestNodeHasLocalData(t *testing.T) {
nil,
false,
},
{
"local-storage-provisioner label was set",
&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{DatadogLocalStorageProvisionerLabel: "topolvm"},
},
},
true,
},
}

for _, tt := range tests {
Expand All @@ -87,7 +96,11 @@ func TestSetNodeLocalDataResource(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: "egg"},
},
)
ni.SetNode(&corev1.Node{})
ni.SetNode(&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{DatadogLocalStorageLabel: "true"},
},
})

SetNodeLocalDataResource(ni)

Expand All @@ -99,5 +112,71 @@ func TestSetNodeLocalDataResource(t *testing.T) {
assert.True(t, ok)
assert.Equal(t, niValue, int64(1))

// Only DatadogLocalDataResource should be set
_, ok = ni.Node().Status.Allocatable[DatadogEphemeralLocalDataResource]
assert.False(t, ok)

_, ok = ni.Allocatable.ScalarResources[DatadogEphemeralLocalDataResource]
assert.False(t, ok)

assert.Equal(t, len(ni.Pods), 2)
}

func TestSetNodeResourceFromTopolvm(t *testing.T) {
var hundredGB int64 = 100 * 1024 * 1024 * 1024
ni := schedulerframework.NewNodeInfo()
ni.SetNode(&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
DatadogLocalStorageProvisionerLabel: "topolvm",
DatadogInitialStorageCapacityLabel: "100Gi",
},
},
})

SetNodeLocalDataResource(ni)

nodeValue, ok := ni.Node().Status.Allocatable[DatadogEphemeralLocalDataResource]
assert.True(t, ok)
assert.Equal(t, nodeValue.String(), resource.NewQuantity(hundredGB, resource.BinarySI).String())

niValue, ok := ni.Allocatable.ScalarResources[DatadogEphemeralLocalDataResource]
assert.True(t, ok)
assert.Equal(t, niValue, hundredGB)

// Only DatadogEphemeralLocalDataResource should be set
_, ok = ni.Node().Status.Allocatable[DatadogLocalDataResource]
assert.False(t, ok)

_, ok = ni.Allocatable.ScalarResources[DatadogLocalDataResource]
assert.False(t, ok)
}

func TestShouldNotSetResourcesWithMissingLabel(t *testing.T) {
ni := schedulerframework.NewNodeInfo()
ni.SetNode(&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
DatadogLocalStorageProvisionerLabel: "topolvm",
},
},
})

SetNodeLocalDataResource(ni)

_, ok := ni.Node().Status.Allocatable[DatadogLocalDataResource]
assert.False(t, ok)
_, ok = ni.Node().Status.Capacity[DatadogLocalDataResource]
assert.False(t, ok)

_, ok = ni.Allocatable.ScalarResources[DatadogLocalDataResource]
assert.False(t, ok)

_, ok = ni.Node().Status.Allocatable[DatadogEphemeralLocalDataResource]
assert.False(t, ok)
_, ok = ni.Node().Status.Capacity[DatadogEphemeralLocalDataResource]
assert.False(t, ok)

_, ok = ni.Allocatable.ScalarResources[DatadogEphemeralLocalDataResource]
assert.False(t, ok)
}
62 changes: 51 additions & 11 deletions cluster-autoscaler/processors/datadog/pods/transform_local_data.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/processors/datadog/common"

apiv1 "k8s.io/api/core/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/fields"
client "k8s.io/client-go/kubernetes"
Expand All @@ -64,6 +65,11 @@ import (
klog "k8s.io/klog/v2"
)

const (
storageClassNameLocal = "local-data"
storageClassNameTopolvm = "ephemeral-local-data"
)

type transformLocalData struct {
pvcLister v1lister.PersistentVolumeClaimLister
stopChannel chan struct{}
Expand All @@ -90,19 +96,25 @@ func (p *transformLocalData) Process(ctx *context.AutoscalingContext, pods []*ap
for _, po := range pods {
var volumes []apiv1.Volume
for _, vol := range po.Spec.Volumes {
if vol.PersistentVolumeClaim == nil {
volumes = append(volumes, vol)
continue
}
pvc, err := p.pvcLister.PersistentVolumeClaims(po.Namespace).Get(vol.PersistentVolumeClaim.ClaimName)
if err != nil {
if !apierrors.IsNotFound(err) {
klog.Warningf("failed to fetch pvc for %s/%s: %v", po.GetNamespace(), po.GetName(), err)
var pvcSpec *apiv1.PersistentVolumeClaimSpec
if vol.PersistentVolumeClaim != nil {
pvc, err := p.pvcLister.PersistentVolumeClaims(po.Namespace).Get(vol.PersistentVolumeClaim.ClaimName)
if err != nil {
if !apierrors.IsNotFound(err) {
klog.Warningf("failed to fetch pvc for %s/%s: %v", po.GetNamespace(), po.GetName(), err)
}
volumes = append(volumes, vol)
continue
}
pvcSpec = &pvc.Spec
} else if vol.Ephemeral != nil {
pvcSpec = &vol.Ephemeral.VolumeClaimTemplate.Spec
} else {
volumes = append(volumes, vol)
continue
}
if *pvc.Spec.StorageClassName != "local-data" {

if !isSpecialPVCStorageClass(*pvcSpec.StorageClassName) {
volumes = append(volumes, vol)
continue
}
Expand All @@ -113,16 +125,44 @@ func (p *transformLocalData) Process(ctx *context.AutoscalingContext, pods []*ap
if len(po.Spec.Containers[0].Resources.Limits) == 0 {
po.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{}
}
if len(pvcSpec.Resources.Requests) == 0 {
pvcSpec.Resources.Requests = apiv1.ResourceList{}
}

po.Spec.Containers[0].Resources.Requests[common.DatadogLocalDataResource] = common.DatadogLocalDataQuantity.DeepCopy()
po.Spec.Containers[0].Resources.Limits[common.DatadogLocalDataResource] = common.DatadogLocalDataQuantity.DeepCopy()
switch *pvcSpec.StorageClassName {
case storageClassNameTopolvm:
if storage, ok := pvcSpec.Resources.Requests[corev1.ResourceStorage]; ok {
po.Spec.Containers[0].Resources.Requests[common.DatadogEphemeralLocalDataResource] = storage.DeepCopy()
po.Spec.Containers[0].Resources.Limits[common.DatadogEphemeralLocalDataResource] = storage.DeepCopy()
} else {
klog.Warningf("ignoring pvc as it does not have storage request information")
volumes = append(volumes, vol)
}
case storageClassNameLocal:
po.Spec.Containers[0].Resources.Requests[common.DatadogLocalDataResource] = common.DatadogLocalDataQuantity.DeepCopy()
po.Spec.Containers[0].Resources.Limits[common.DatadogLocalDataResource] = common.DatadogLocalDataQuantity.DeepCopy()
default:
klog.Warningf("this should never be reached. pvc storage class (%s) cannot be used for scaling on pod: %s", *pvcSpec.StorageClassName, po.Name)
volumes = append(volumes, vol)
}
}
po.Spec.Volumes = volumes
}

return pods, nil
}

func isSpecialPVCStorageClass(className string) bool {
switch className {
case storageClassNameTopolvm:
return true
case storageClassNameLocal:
return true
default:
return false
}
}

// NewPersistentVolumeClaimLister builds a persistentvolumeclaim lister.
func NewPersistentVolumeClaimLister(kubeClient client.Interface, stopchannel <-chan struct{}) v1lister.PersistentVolumeClaimLister {
listWatcher := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "persistentvolumeclaims", apiv1.NamespaceAll, fields.Everything())
Expand Down
Loading
Loading