Skip to content

Commit

Permalink
refactor: rename the relevant service name and statefulset name
Browse files Browse the repository at this point in the history
Signed-off-by: Phoeniix Zhao <[email protected]>
  • Loading branch information
Phoenix500526 committed Feb 5, 2024
1 parent ac5306f commit 85799a7
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 62 deletions.
12 changes: 5 additions & 7 deletions internal/reconciler/cluster_sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package reconciler

import (
xapi "github.com/xline-kv/xline-operator/api/v1alpha1"
tran "github.com/xline-kv/xline-operator/internal/transformer"
appv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
)
Expand All @@ -16,25 +15,24 @@ func (r *XlineClusterReconciler) Sync() (xapi.XlineClusterSyncStatus, error) {

// sync XlineCluster status
func (r *XlineClusterReconciler) syncXlineStatus(xlineStatus *xapi.XlineClusterSyncStatus) error {
svcRef := tran.GetServiceKey(r.CR.ObjKey())
xcLookupKey := r.CR.ObjKey()
svc := &corev1.Service{}
exist, err := r.Exist(svcRef, svc)
exist, err := r.Exist(xcLookupKey, svc)
if err != nil {
return err
}
if exist {
xlineStatus.ServiceRef = xapi.NewNamespacedName(svcRef)
xlineStatus.ServiceRef = xapi.NewNamespacedName(xcLookupKey)
}

stsRef := tran.GetStatefulSetKey(r.CR.ObjKey())
sts := &appv1.StatefulSet{}
exist, err = r.Exist(stsRef, sts)
exist, err = r.Exist(xcLookupKey, sts)
if err != nil {
return err
}
if exist {
xlineStatus.Image = *r.CR.Spec.Image
xlineStatus.StatefulSetRef = xapi.NewNamespacedName(stsRef)
xlineStatus.StatefulSetRef = xapi.NewNamespacedName(xcLookupKey)
xlineStatus.Conditions = sts.Status.Conditions
}

Expand Down
36 changes: 10 additions & 26 deletions internal/transformer/xlinecluster_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,29 +19,16 @@ const (
DataDir = "/usr/local/xline/data-dir"
)

func GetServiceKey(xlineClusterName types.NamespacedName) types.NamespacedName {
return types.NamespacedName{
Namespace: xlineClusterName.Namespace,
Name: fmt.Sprintf("%s-svc", xlineClusterName.Name),
}
}

func GetStatefulSetKey(xlineClusterName types.NamespacedName) types.NamespacedName {
return types.NamespacedName{
Namespace: xlineClusterName.Namespace,
Name: fmt.Sprintf("%s-sts", xlineClusterName.Name),
}
}

func GetXlineInstanceLabels(xlineClusterName types.NamespacedName) map[string]string {
return MakeResourceLabels(xlineClusterName.Name)
}

func GetMemberTopology(stsRef types.NamespacedName, svcName string, replicas int) string {
func GetMemberTopology(cr *xapi.XlineCluster) string {
replicas := int(cr.Spec.Replicas)
members := make([]string, replicas)
for i := 0; i < replicas; i++ {
podName := fmt.Sprintf("%s-%d", stsRef.Name, i)
dnsName := fmt.Sprintf("%s.%s.%s.svc.cluster.local", podName, svcName, stsRef.Namespace)
podName := fmt.Sprintf("%s-%d", cr.Name, i)
dnsName := fmt.Sprintf("%s.%s.%s.svc.cluster.local", podName, cr.Name, cr.Namespace)
members[i] = fmt.Sprintf("%s=%s:%d", podName, dnsName, XlinePort)
}
return strings.Join(members, ",")
Expand Down Expand Up @@ -80,12 +67,11 @@ func GetAuthSecretEnvVars(auth_sec *xapi.XlineAuthSecret) []corev1.EnvVar {
}

func MakeService(cr *xapi.XlineCluster, scheme *runtime.Scheme) *corev1.Service {
svcRef := GetServiceKey(cr.ObjKey())
svcLabel := GetXlineInstanceLabels(cr.ObjKey())
service := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: svcRef.Name,
Namespace: svcRef.Namespace,
Name: cr.Name,
Namespace: cr.Namespace,
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
Expand All @@ -104,9 +90,7 @@ func MakeService(cr *xapi.XlineCluster, scheme *runtime.Scheme) *corev1.Service

func MakeStatefulSet(cr *xapi.XlineCluster, scheme *runtime.Scheme) *appv1.StatefulSet {
crName := types.NamespacedName{Namespace: cr.Namespace, Name: cr.Name}
stsRef := GetStatefulSetKey(crName)
stsLabels := GetXlineInstanceLabels(crName)
svcName := GetServiceKey(cr.ObjKey()).Name

initCmd := []string{
"xline",
Expand All @@ -118,7 +102,7 @@ func MakeStatefulSet(cr *xapi.XlineCluster, scheme *runtime.Scheme) *appv1.State
initCmd = append(initCmd, cr.Spec.BootArgs()...)

envs := []corev1.EnvVar{
{Name: "MEMBERS", Value: GetMemberTopology(stsRef, svcName, int(cr.Spec.Replicas))},
{Name: "MEMBERS", Value: GetMemberTopology(cr)},
{Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
Expand Down Expand Up @@ -164,13 +148,13 @@ func MakeStatefulSet(cr *xapi.XlineCluster, scheme *runtime.Scheme) *appv1.State
// statefulset
statefulSet := &appv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: stsRef.Name,
Namespace: stsRef.Namespace,
Name: cr.Name,
Namespace: cr.Namespace,
Labels: stsLabels,
},
Spec: appv1.StatefulSetSpec{
Replicas: &cr.Spec.Replicas,
ServiceName: svcName,
ServiceName: cr.Name,
Selector: &metav1.LabelSelector{MatchLabels: stsLabels},
VolumeClaimTemplates: pvcTemplates,
Template: podTemplate,
Expand Down
19 changes: 1 addition & 18 deletions internal/transformer/xlinecluster_resource_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,30 +23,13 @@ func TestXlineClusterFunc(t *testing.T) {
},
}

t.Run("GetServiceKey should work properly", func(t *testing.T) {
xcLookupKey := xlineCluster.ObjKey()
svcObj := GetServiceKey(xcLookupKey)
assert.Equal(t, svcObj.Namespace, "default")
assert.Equal(t, svcObj.Name, "xline-svc")
})

t.Run("GetStatefulSetKey should work properly", func(t *testing.T) {
xcLookupKey := xlineCluster.ObjKey()
stsObj := GetStatefulSetKey(xcLookupKey)
assert.Equal(t, stsObj.Namespace, "default")
assert.Equal(t, stsObj.Name, "xline-sts")
})

t.Run("GetXlineImage should work properly", func(t *testing.T) {
xlineImage := *xlineCluster.Spec.Image
assert.Equal(t, xlineImage, "xline-img:latest")
})

t.Run("GetMemberTopology should work properly", func(t *testing.T) {
xcLookupKey := xlineCluster.ObjKey()
stsRef := GetStatefulSetKey(xcLookupKey)
svcName := GetServiceKey(xcLookupKey).Name
topology := GetMemberTopology(stsRef, svcName, 3)
topology := GetMemberTopology(&xlineCluster)
topologyVec := strings.Split(topology, ",")
assert.Equal(t, len(topologyVec), 3)
for i := 0; i < 3; i++ {
Expand Down
20 changes: 9 additions & 11 deletions tests/e2e/cases/ci.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@ source "${E2E_TEST_DIR}/common/common.sh"
source "${E2E_TEST_DIR}/testenv/testenv.sh"

_TEST_CI_CLUSTER_NAME="my-xline-cluster"
_TEST_CI_STS_NAME="$_TEST_CI_CLUSTER_NAME-sts"
_TEST_CI_SVC_NAME="$_TEST_CI_CLUSTER_NAME-svc"
_TEST_CI_SECRET_NAME="auth-cred"
_TEST_CI_NAMESPACE="default"
_TEST_CI_DNS_SUFFIX="svc.cluster.local"
Expand All @@ -14,9 +12,9 @@ _TEST_CI_STORAGECLASS_NAME="e2e-storage"
_TEST_CI_LOG_SYNC_TIMEOUT=30

function test::ci::_mk_endpoints() {
local endpoints="${_TEST_CI_STS_NAME}-0.${_TEST_CI_SVC_NAME}.${_TEST_CI_NAMESPACE}.${_TEST_CI_DNS_SUFFIX}:${_TEST_CI_XLINE_PORT}"
local endpoints="${_TEST_CI_CLUSTER_NAME}-0.${_TEST_CI_CLUSTER_NAME}.${_TEST_CI_NAMESPACE}.${_TEST_CI_DNS_SUFFIX}:${_TEST_CI_XLINE_PORT}"
for ((i = 1; i < $1; i++)); do
endpoints="${endpoints},${_TEST_CI_STS_NAME}-${i}.${_TEST_CI_SVC_NAME}.${_TEST_CI_NAMESPACE}.${_TEST_CI_DNS_SUFFIX}:${_TEST_CI_XLINE_PORT}"
endpoints="${endpoints},${_TEST_CI_CLUSTER_NAME}-${i}.${_TEST_CI_CLUSTER_NAME}.${_TEST_CI_NAMESPACE}.${_TEST_CI_DNS_SUFFIX}:${_TEST_CI_XLINE_PORT}"
done
echo "$endpoints"
}
Expand Down Expand Up @@ -140,17 +138,17 @@ function test::ci::_uninstall_CRD() {

function test::ci::wait_all_xline_pod_ready() {
for ((i = 0; i < $1; i++)); do
log::info "wait pod/${_TEST_CI_STS_NAME}-${i} to be ready"
if ! k8s::kubectl wait --for=condition=Ready pod/${_TEST_CI_STS_NAME}-${i} --timeout=300s; then
log::info "wait pod/${_TEST_CI_CLUSTER_NAME}-${i} to be ready"
if ! k8s::kubectl wait --for=condition=Ready pod/${_TEST_CI_CLUSTER_NAME}-${i} --timeout=300s; then
log::fatal "Failed to wait for util to be ready"
fi
done
}

function test::ci::wait_all_xline_pod_deleted() {
for ((i = 0; i < $1; i++)); do
log::info "wait pod/${_TEST_CI_STS_NAME}-${i} to be ready"
if ! k8s::kubectl wait --for=delete pod/${_TEST_CI_STS_NAME}-${i} --timeout=300s; then
log::info "wait pod/${_TEST_CI_CLUSTER_NAME}-${i} to be ready"
if ! k8s::kubectl wait --for=delete pod/${_TEST_CI_CLUSTER_NAME}-${i} --timeout=300s; then
log::fatal "Failed to wait for util to be ready"
fi
done
Expand All @@ -168,7 +166,7 @@ function test::ci::_prepare_pv() {

function test::ci::_clean_pvc() {
for ((i = 0; i < $1; i++)); do
local pvc_name="xline-storage-${_TEST_CI_STS_NAME}-${i}"
local pvc_name="xline-storage-${_TEST_CI_CLUSTER_NAME}-${i}"
log::info "deleting pvc $pvc_name ..."
k8s::kubectl delete pvc $pvc_name >/dev/null 2>&1
if ! k8s::kubectl wait --for=delete pvc/${pvc_name} --timeout=300s; then
Expand Down Expand Up @@ -200,7 +198,7 @@ function test::ci::_start() {
test::ci::_prepare_pv
log::info "starting xline cluster..."
k8s::kubectl apply -f "$(dirname "${BASH_SOURCE[0]}")/manifests/cluster.yaml" >/dev/null 2>&1
k8s::kubectl::wait_resource_creation sts $_TEST_CI_STS_NAME
k8s::kubectl::wait_resource_creation sts $_TEST_CI_CLUSTER_NAME
}


Expand Down Expand Up @@ -231,7 +229,7 @@ function test::ci::_chaos() {
kill=$((RANDOM % fault_tolerance + 1))
log::info "chaos: kill=$kill"
for ((j = 0; j < $kill; j++)); do
pod="${_TEST_CI_STS_NAME}-$((RANDOM % size))"
pod="${_TEST_CI_CLUSTER_NAME}-$((RANDOM % size))"
log::info "chaos: kill pod=$pod"
k8s::kubectl delete pod "$pod" --force --grace-period=0 2>/dev/null
done
Expand Down

0 comments on commit 85799a7

Please sign in to comment.