Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Configure New HAProxy to Direct Traffic to Redis with Role: Slave #37

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions operator/redisfailover/ensurer.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ import (

redisfailoverv1 "github.com/spotahome/redis-operator/api/redisfailover/v1"
"github.com/spotahome/redis-operator/metrics"
"github.com/spotahome/redis-operator/operator/redisfailover/util"

rfservice "github.com/spotahome/redis-operator/operator/redisfailover/service"
)

// Ensure is called to ensure all of the resources associated with a RedisFailover are created
Expand All @@ -29,6 +32,8 @@ func (w *RedisFailoverHandler) Ensure(rf *redisfailoverv1.RedisFailover, labels
}

if rf.Spec.Haproxy != nil {
labels = util.MergeLabels(labels, rfservice.GetHAProxyRedisLabels())

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This type of thing usually happens down in the generators, no?


if err := w.rfService.EnsureHAProxyService(rf, labels, or); err != nil {
return err
}
Expand Down Expand Up @@ -86,5 +91,21 @@ func (w *RedisFailoverHandler) Ensure(rf *redisfailoverv1.RedisFailover, labels
}
}

if rf.Spec.BootstrapNode != nil {
labels = util.MergeLabels(labels, rfservice.GetHAProxyRedisSlaveLabels())

if err := w.rfService.EnsureHAProxyService(rf, labels, or); err != nil {
return err
}

if err := w.rfService.EnsureHAProxyConfigmap(rf, labels, or); err != nil {
return err
}

if err := w.rfService.EnsureHAProxyDeployment(rf, labels, or); err != nil {
return err
}
}

return nil
}
4 changes: 4 additions & 0 deletions operator/redisfailover/ensurer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,10 @@ func TestEnsure(t *testing.T) {
mrfs.On("EnsureRedisReadinessConfigMap", rf, mock.Anything, mock.Anything).Once().Return(nil)
mrfs.On("EnsureRedisStatefulset", rf, mock.Anything, mock.Anything).Once().Return(nil)

mrfs.On("EnsureHAProxyService", rf, mock.Anything, mock.Anything).Once().Return(nil)
mrfs.On("EnsureHAProxyConfigmap", rf, mock.Anything, mock.Anything).Once().Return(nil)
mrfs.On("EnsureHAProxyDeployment", rf, mock.Anything, mock.Anything).Once().Return(nil)

// Create the Kops client and call the valid logic.
handler := rfOperator.NewRedisFailoverHandler(config, mrfs, mrfc, mrfh, mk, metrics.Dummy, log.Dummy)
err := handler.Ensure(rf, map[string]string{}, []metav1.OwnerReference{}, metrics.Dummy)
Expand Down
51 changes: 39 additions & 12 deletions operator/redisfailover/service/generator.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,32 @@ sentinel parallel-syncs mymaster 2`
graceTime = 30
)

const redisHAProxyName = "redis-haproxy"
const (
rfLabelInstance = "app.kubernetes.io/instance"
redisHAProxyName = "redis-haproxy"
redisSlaveHAProxyName = "redis-s-haproxy"
Comment on lines +55 to +56

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Had you considered putting these in the constants with the other resource naming conventions therein?

Given how other resources the operator names other resource, I'd expect the full generated name of the haproxy bits to be something like:
Redis Failover Redis Master HAproxy

rfrmh-[redis-failover-name] 

Redis Failover Redis Master HAproxy

rfrsh-[redis-failover-name]

)

var (
haproxyRedisSlaveLabels = map[string]string{
rfLabelInstance: redisSlaveHAProxyName,

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would the existing "redisfailovers.databases.spotahome.com/component" and "redisfailovers.databases.spotahome.com/redisfailovers-role" labels not be enough and/or appropriate here?

AFAIK, the semantics of the app.kuebernetes.io/instance label are that its supposed to unique to the running resource:

Description: A unique name identifying the instance of an application
Example: mysql-abcxzy
-- https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/#labels

}
haproxyRedisLabels = map[string]string{
rfLabelInstance: redisHAProxyName,
}
)

func GetHAProxyRedisLabels() map[string]string {
return haproxyRedisLabels
}

func GetHAProxyRedisSlaveLabels() map[string]string {
return haproxyRedisSlaveLabels
}
Comment on lines +68 to +74

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a reason we can't reuse the existing generateRedisMasterRoleLabel and generateSlaveRoleLabel functions for this?

func generateRedisMasterRoleLabel() map[string]string {
return map[string]string{
redisRoleLabelKey: redisRoleLabelMaster,
}
}
func generateRedisSlaveRoleLabel() map[string]string {
return map[string]string{
redisRoleLabelKey: redisRoleLabelSlave,
}
}


func generateHAProxyDeployment(rf *redisfailoverv1.RedisFailover, labels map[string]string, ownerRefs []metav1.OwnerReference) *appsv1.Deployment {

name := rf.GenerateName(redisHAProxyName)
name := rf.GenerateName(labels[rfLabelInstance])

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Any reason we don't follow the convention for generating names of resource established in the names.go here?

// GetRedisName returns the name for redis resources
func GetRedisName(rf *redisfailoverv1.RedisFailover) string {
return generateName(redisName, rf.Name)
}

Had we considered adding a name generators for the HAProxy bits?


namespace := rf.Namespace

Expand Down Expand Up @@ -131,7 +152,7 @@ func generateHAProxyDeployment(rf *redisfailoverv1.RedisFailover, labels map[str
}

func generateHAProxyConfigmap(rf *redisfailoverv1.RedisFailover, labels map[string]string, ownerRefs []metav1.OwnerReference) *corev1.ConfigMap {
name := rf.GenerateName(redisHAProxyName)
name := rf.GenerateName(labels[rfLabelInstance])
redisName := rf.GenerateName("redis")

namespace := rf.Namespace
Expand All @@ -140,6 +161,11 @@ func generateHAProxyConfigmap(rf *redisfailoverv1.RedisFailover, labels map[stri
"app.kubernetes.io/component": "redis",
})

redisRole := "role:master"
if labels[rfLabelInstance] == redisSlaveHAProxyName {
redisRole = "role:slave"
}
Comment on lines +164 to +167

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see a lot of duplicate branching and inferring based on "role" in the HAProxy resource generators - implicitly in the deployment, and again explicitly in the service generator.

Had you considered exposing explicit functions for creating the master and slave harpoxy bits and levae it up to the caller to decide which one they want; similar to the pattern used the existing Master and Slave service generators?

func generateRedisMasterService(rf *redisfailoverv1.RedisFailover, labels map[string]string, ownerRefs []metav1.OwnerReference) *corev1.Service {

func generateRedisSlaveService(rf *redisfailoverv1.RedisFailover, labels map[string]string, ownerRefs []metav1.OwnerReference) *corev1.Service {


port := rf.Spec.Redis.Port
haproxyCfg := fmt.Sprintf(`global
daemon
Expand Down Expand Up @@ -173,20 +199,20 @@ func generateHAProxyConfigmap(rf *redisfailoverv1.RedisFailover, labels map[stri
hold valid 10s
hold obsolete 10s

frontend redis-master
frontend redis-instance
bind *:%d
default_backend redis-master
default_backend redis-instance

backend redis-master
backend redis-instance
mode tcp
balance first
option tcp-check
tcp-check send info\ replication\r\n
tcp-check expect string role:master
tcp-check expect string %s
server-template redis %d _redis._tcp.%s.%s.svc.cluster.local:%d check inter 1s resolvers k8s init-addr none
`, port, rf.Spec.Redis.Replicas, redisName, namespace, port)
`, port, redisRole, rf.Spec.Redis.Replicas, redisName, namespace, port)

if rf.Spec.Haproxy.CustomConfig != "" {
if rf.Spec.Haproxy.CustomConfig != "" && labels[rfLabelInstance] != redisSlaveHAProxyName {
haproxyCfg = rf.Spec.Haproxy.CustomConfig
}

Expand Down Expand Up @@ -239,9 +265,10 @@ func generateRedisHeadlessService(rf *redisfailoverv1.RedisFailover, labels map[

func generateHAProxyService(rf *redisfailoverv1.RedisFailover, labels map[string]string, ownerRefs []metav1.OwnerReference) *corev1.Service {
name := rf.Spec.Haproxy.RedisHost
if name == "" {
name = redisHAProxyName
if name == "" || labels[rfLabelInstance] == redisSlaveHAProxyName {
name = rf.GenerateName(labels[rfLabelInstance])
}

namespace := rf.Namespace
redisTargetPort := intstr.FromInt(int(rf.Spec.Redis.Port))
selectorLabels := map[string]string{
Expand All @@ -255,7 +282,7 @@ func generateHAProxyService(rf *redisfailoverv1.RedisFailover, labels map[string
Type: "ClusterIP",
Ports: []corev1.ServicePort{
{
Name: "redis-master",
Name: "redis-instance",
Port: rf.Spec.Redis.Port.ToInt32(),
TargetPort: redisTargetPort,
Protocol: "TCP",
Expand Down
49 changes: 47 additions & 2 deletions operator/redisfailover/service/generator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1295,8 +1295,8 @@ func TestRedisService(t *testing.T) {
}

func TestHaproxyService(t *testing.T) {
haproxyName := "redis-haproxy"
portName := "redis-master"
haproxyName := "redis-haproxy-test"
portName := "redis-instance"
defaultRedisPort := redisfailoverv1.Port(6379)
customClusterIP := "10.1.1.100"
tests := []struct {
Expand All @@ -1312,6 +1312,7 @@ func TestHaproxyService(t *testing.T) {
{
name: "with defaults",
rfRedisPort: defaultRedisPort,
rfLabels: rfservice.GetHAProxyRedisLabels(),
expectedService: corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: haproxyName,
Expand All @@ -1321,12 +1322,16 @@ func TestHaproxyService(t *testing.T) {
Name: "testing",
},
},
Labels: map[string]string{
"app.kubernetes.io/instance": "redis-haproxy",
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Selector: map[string]string{
"app.kubernetes.io/component": "redis",
"redisfailovers.databases.spotahome.com/component": "haproxy",
"app.kubernetes.io/instance": "redis-haproxy",
},
Ports: []corev1.ServicePort{
{
Expand All @@ -1342,6 +1347,7 @@ func TestHaproxyService(t *testing.T) {
{
name: "with custom ClusterIP",
rfRedisPort: defaultRedisPort,
rfLabels: rfservice.GetHAProxyRedisLabels(),
haproxy: redisfailoverv1.HaproxySettings{
Service: &redisfailoverv1.ServiceSettings{
ClusterIP: customClusterIP,
Expand All @@ -1356,13 +1362,52 @@ func TestHaproxyService(t *testing.T) {
Name: "testing",
},
},
Labels: map[string]string{
"app.kubernetes.io/instance": "redis-haproxy",
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: customClusterIP,
Selector: map[string]string{
"app.kubernetes.io/component": "redis",
"redisfailovers.databases.spotahome.com/component": "haproxy",
"app.kubernetes.io/instance": "redis-haproxy",
},
Ports: []corev1.ServicePort{
{
Name: portName,
Port: defaultRedisPort.ToInt32(),
TargetPort: intstr.FromInt(int(defaultRedisPort)),
Protocol: corev1.ProtocolTCP,
},
},
},
},
},
{
name: "with HAProxy Redis Slave Labels",
rfRedisPort: defaultRedisPort,
rfLabels: rfservice.GetHAProxyRedisSlaveLabels(),
expectedService: corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "redis-s-haproxy-test",
Namespace: namespace,
OwnerReferences: []metav1.OwnerReference{
{
Name: "testing",
},
},
Labels: map[string]string{
"app.kubernetes.io/instance": "redis-s-haproxy",
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Selector: map[string]string{
"app.kubernetes.io/component": "redis",

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Low priority context query.

I think this is a "preexisting condition" but any idea why HAProxy components of the RedisFailver would receive the app.kubernetes.io/component label value of "redis" instaed of something like "proxy" or "redis-proxy"?

"redisfailovers.databases.spotahome.com/component": "haproxy",
"app.kubernetes.io/instance": "redis-s-haproxy",
},
Ports: []corev1.ServicePort{
{
Expand Down