From aeba01968b15ecc63e9d477f102d9d62df550eb6 Mon Sep 17 00:00:00 2001 From: Shubham Gupta <69793468+shubham-cmyk@users.noreply.github.com> Date: Fri, 29 Mar 2024 08:06:55 +0530 Subject: [PATCH] test(client): Add Redis Mock Test code (#842) * test(client) : Add Redis Mock Test code Signed-off-by: Shubham Gupta * test(client): Add for followers node id's Signed-off-by: Shubham Gupta * test: cover checkAttachedSlave Signed-off-by: Shubham Gupta * test: Add redis server role Signed-off-by: Shubham Gupta * test: add redis operator pod logs Signed-off-by: Shubham Gupta * chore: add log statement Signed-off-by: Shubham Gupta * chore: fix logs Signed-off-by: Shubham Gupta * test: fix unit test Signed-off-by: Shubham Gupta * fix: return data Signed-off-by: Shubham Gupta * fix the role Signed-off-by: Shubham Gupta * chore: remove comment Signed-off-by: Shubham Gupta * refactor: move logger to params Signed-off-by: Shubham Gupta * test: Add test Get statefulset Signed-off-by: Shubham Gupta --------- Signed-off-by: Shubham Gupta --- controllers/rediscluster_controller.go | 4 +- controllers/redisreplication_controller.go | 2 +- k8sutils/cluster-scaling.go | 27 ++-- k8sutils/cluster-scaling_test.go | 166 +++++++++++++++++++++ k8sutils/redis-cluster.go | 5 +- k8sutils/redis-replication.go | 12 +- k8sutils/redis-sentinel.go | 15 +- k8sutils/redis-standalone.go | 10 +- k8sutils/redis.go | 76 +++++----- k8sutils/redis_test.go | 149 ++++++++++++++++++ k8sutils/statefulset.go | 15 +- k8sutils/statefulset_test.go | 97 ++++++++++++ 12 files changed, 504 insertions(+), 74 deletions(-) diff --git a/controllers/rediscluster_controller.go b/controllers/rediscluster_controller.go index 3cc478f93..c540e47e4 100644 --- a/controllers/rediscluster_controller.go +++ b/controllers/rediscluster_controller.go @@ -132,7 +132,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, err } - redisLeaderInfo, err := k8sutils.GetStatefulSet(instance.Namespace, instance.ObjectMeta.Name+"-leader", r.K8sClient) + redisLeaderInfo, err := k8sutils.GetStatefulSet(r.K8sClient, r.Log, instance.GetNamespace(), instance.GetName()+"-leader") if err != nil { if errors.IsNotFound(err) { return ctrl.Result{RequeueAfter: time.Second * 60}, nil @@ -164,7 +164,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, err } } - redisFollowerInfo, err := k8sutils.GetStatefulSet(instance.Namespace, instance.ObjectMeta.Name+"-follower", r.K8sClient) + redisFollowerInfo, err := k8sutils.GetStatefulSet(r.K8sClient, r.Log, instance.GetNamespace(), instance.GetName()+"-follower") if err != nil { if errors.IsNotFound(err) { return ctrl.Result{RequeueAfter: time.Second * 60}, nil diff --git a/controllers/redisreplication_controller.go b/controllers/redisreplication_controller.go index d6f9b85cb..a57308e80 100644 --- a/controllers/redisreplication_controller.go +++ b/controllers/redisreplication_controller.go @@ -66,7 +66,7 @@ func (r *RedisReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Req // Set Pod distruptiuon Budget Later - redisReplicationInfo, err := k8sutils.GetStatefulSet(instance.Namespace, instance.ObjectMeta.Name, r.K8sClient) + redisReplicationInfo, err := k8sutils.GetStatefulSet(r.K8sClient, r.Log, instance.GetNamespace(), instance.GetName()) if err != nil { return ctrl.Result{RequeueAfter: time.Second * 60}, err } diff --git a/k8sutils/cluster-scaling.go b/k8sutils/cluster-scaling.go index f95163758..da68c44a8 100644 --- a/k8sutils/cluster-scaling.go +++ b/k8sutils/cluster-scaling.go @@ -17,6 +17,9 @@ import ( // NOTE: when all slot been transferred, the node become slave of the first master node. func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, remove bool) { ctx := context.TODO() + redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + defer redisClient.Close() + var cmd []string currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") @@ -62,7 +65,7 @@ func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *re cmd = append(cmd, transferNodeID) // Cluster Slots - slot := getRedisClusterSlots(ctx, client, logger, cr, removeNodeID) + slot := getRedisClusterSlots(ctx, redisClient, logger, removeNodeID) cmd = append(cmd, "--cluster-slots") cmd = append(cmd, slot) @@ -81,12 +84,9 @@ func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *re } } -func getRedisClusterSlots(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, nodeID string) string { +func getRedisClusterSlots(ctx context.Context, redisClient *redis.Client, logger logr.Logger, nodeID string) string { totalSlots := 0 - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") - defer redisClient.Close() - redisSlots, err := redisClient.ClusterSlots(ctx).Result() if err != nil { logger.Error(err, "Failed to Get Cluster Slots") @@ -168,6 +168,8 @@ func RebalanceRedisClusterEmptyMasters(client kubernetes.Interface, logger logr. func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { totalRedisLeaderNodes := CheckRedisNodeCount(ctx, client, logger, cr, "leader") + redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + defer redisClient.Close() for i := 0; i < int(totalRedisLeaderNodes); i++ { pod := RedisDetails{ @@ -175,7 +177,7 @@ func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, logge Namespace: cr.Namespace, } podNodeID := getRedisNodeID(ctx, client, logger, cr, pod) - podSlots := getRedisClusterSlots(ctx, client, logger, cr, podNodeID) + podSlots := getRedisClusterSlots(ctx, redisClient, logger, podNodeID) if podSlots == "0" || podSlots == "" { logger.V(1).Info("Found Empty Redis Leader Node", "pod", pod) @@ -256,10 +258,7 @@ func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, log } // getAttachedFollowerNodeIDs would return a slice of redis followers attached to a redis leader -func getAttachedFollowerNodeIDs(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, masterNodeID string) []string { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") - defer redisClient.Close() - +func getAttachedFollowerNodeIDs(ctx context.Context, redisClient *redis.Client, logger logr.Logger, masterNodeID string) []string { slaveIDs, err := redisClient.ClusterSlaves(ctx, masterNodeID).Result() if err != nil { logger.Error(err, "Failed to get attached follower node IDs", "masterNodeID", masterNodeID) @@ -272,6 +271,8 @@ func getAttachedFollowerNodeIDs(ctx context.Context, client kubernetes.Interface // Remove redis follower node would remove all follower nodes of last leader node using redis-cli func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { var cmd []string + redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + defer redisClient.Close() currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") existingPod := RedisDetails{ @@ -296,7 +297,7 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes. cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) lastLeaderPodNodeID := getRedisNodeID(ctx, client, logger, cr, lastLeaderPod) - followerNodeIDs := getAttachedFollowerNodeIDs(ctx, client, logger, cr, lastLeaderPodNodeID) + followerNodeIDs := getAttachedFollowerNodeIDs(ctx, redisClient, logger, lastLeaderPodNodeID) cmd = append(cmd, "--cluster", "del-node") if *cr.Spec.ClusterVersion == "v7" { @@ -316,6 +317,8 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes. // Remove redis cluster node would remove last node to the existing redis cluster using redis-cli func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, removePod RedisDetails) { var cmd []string + redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + defer redisClient.Close() // currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") existingPod := RedisDetails{ @@ -350,7 +353,7 @@ func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) logger.V(1).Info("Redis cluster leader remove command is", "Command", cmd) - if getRedisClusterSlots(ctx, client, logger, cr, removePodNodeID) != "0" { + if getRedisClusterSlots(ctx, redisClient, logger, removePodNodeID) != "0" { logger.V(1).Info("Skipping execution remove leader not empty", "cmd", cmd) } executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") diff --git a/k8sutils/cluster-scaling_test.go b/k8sutils/cluster-scaling_test.go index ca8107712..fe1bd3bd0 100644 --- a/k8sutils/cluster-scaling_test.go +++ b/k8sutils/cluster-scaling_test.go @@ -2,6 +2,7 @@ package k8sutils import ( "context" + "fmt" "testing" "github.com/go-logr/logr" @@ -61,3 +62,168 @@ func Test_verifyLeaderPodInfo(t *testing.T) { }) } } + +func Test_getRedisClusterSlots(t *testing.T) { + logger := logr.Discard() + + tests := []struct { + name string + nodeID string + clusterSlots []redis.ClusterSlot + clusterSlotsErr error + expectedResult string + }{ + { + name: "successful slot count", + nodeID: "node123", + clusterSlots: []redis.ClusterSlot{ + {Start: 0, End: 4999, Nodes: []redis.ClusterNode{{ID: "node123"}}}, + {Start: 5000, End: 9999, Nodes: []redis.ClusterNode{{ID: "node123"}}}, + }, + expectedResult: "10000", + }, + { + name: "error fetching cluster slots", + nodeID: "node123", + clusterSlotsErr: redis.ErrClosed, + expectedResult: "", + }, + { + name: "no slots for node", + nodeID: "node999", + clusterSlots: []redis.ClusterSlot{ + {Start: 0, End: 4999, Nodes: []redis.ClusterNode{{ID: "node123"}}}, + }, + expectedResult: "0", + }, + { + name: "slots for multiple nodes", + nodeID: "node123", + clusterSlots: []redis.ClusterSlot{ + {Start: 0, End: 1999, Nodes: []redis.ClusterNode{{ID: "node123"}}}, + {Start: 2000, End: 3999, Nodes: []redis.ClusterNode{{ID: "node456"}}}, + {Start: 4000, End: 5999, Nodes: []redis.ClusterNode{{ID: "node123"}, {ID: "node789"}}}, + }, + expectedResult: "4000", + }, + { + name: "single slot range", + nodeID: "node123", + clusterSlots: []redis.ClusterSlot{ + {Start: 100, End: 100, Nodes: []redis.ClusterNode{{ID: "node123"}}}, + }, + expectedResult: "1", + }, + { + name: "mixed slot ranges", + nodeID: "node123", + clusterSlots: []redis.ClusterSlot{ + {Start: 0, End: 499, Nodes: []redis.ClusterNode{{ID: "node123"}}}, + {Start: 500, End: 999, Nodes: []redis.ClusterNode{{ID: "node123"}, {ID: "node999"}}}, + {Start: 1000, End: 1499, Nodes: []redis.ClusterNode{{ID: "node999"}}}, + {Start: 1500, End: 1999, Nodes: []redis.ClusterNode{{ID: "node123"}}}, + }, + expectedResult: "1500", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.TODO() + client, mock := redismock.NewClientMock() + + if tt.clusterSlotsErr != nil { + mock.ExpectClusterSlots().SetErr(tt.clusterSlotsErr) + } else { + mock.ExpectClusterSlots().SetVal(tt.clusterSlots) + } + + result := getRedisClusterSlots(ctx, client, logger, tt.nodeID) + + assert.Equal(t, tt.expectedResult, result, "Test case: "+tt.name) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unmet expectations: %s", err) + } + }) + } +} + +func Test_getAttachedFollowerNodeIDs(t *testing.T) { + logger := logr.Discard() + + tests := []struct { + name string + masterNodeID string + slaveNodeIDs []string + clusterSlavesErr error + expectedslaveNodeIDs []string + }{ + { + name: "successful retrieval of slave nodes", + masterNodeID: "master123", + slaveNodeIDs: []string{"slave1", "slave2"}, + expectedslaveNodeIDs: []string{"slave1", "slave2"}, + }, + { + name: "error fetching slave nodes", + masterNodeID: "master123", + clusterSlavesErr: redis.ErrClosed, + expectedslaveNodeIDs: nil, + }, + { + name: "no attached slave nodes", + masterNodeID: "master456", + slaveNodeIDs: []string{}, + expectedslaveNodeIDs: []string{}, + }, + { + name: "nil response for slave nodes", + masterNodeID: "masterNode123", + slaveNodeIDs: nil, + expectedslaveNodeIDs: nil, + clusterSlavesErr: nil, + }, + { + name: "large number of attached slave nodes", + masterNodeID: "master123", + slaveNodeIDs: generateLargeListOfSlaves(1000), // Helper function needed + expectedslaveNodeIDs: generateLargeListOfSlaves(1000), + }, + { + name: "invalid master node ID", + masterNodeID: "invalidMasterID", + slaveNodeIDs: nil, + expectedslaveNodeIDs: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + client, mock := redismock.NewClientMock() + + if tt.clusterSlavesErr != nil { + mock.ExpectClusterSlaves(tt.masterNodeID).SetErr(tt.clusterSlavesErr) + } else { + mock.ExpectClusterSlaves(tt.masterNodeID).SetVal(tt.slaveNodeIDs) + } + + result := getAttachedFollowerNodeIDs(ctx, client, logger, tt.masterNodeID) + + assert.ElementsMatch(t, tt.expectedslaveNodeIDs, result, "Test case: "+tt.name) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unmet expectations: %s", err) + } + }) + } +} + +func generateLargeListOfSlaves(n int) []string { + var slaves []string + for i := 0; i < n; i++ { + slaves = append(slaves, fmt.Sprintf("slaveNode%d", i)) + } + return slaves +} diff --git a/k8sutils/redis-cluster.go b/k8sutils/redis-cluster.go index aae4cf5bf..f3dc5e973 100644 --- a/k8sutils/redis-cluster.go +++ b/k8sutils/redis-cluster.go @@ -270,14 +270,15 @@ func (service RedisClusterSTS) CreateRedisClusterSetup(cr *redisv1beta2.RedisClu annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( - cr.Namespace, + cl, + logger, + cr.GetNamespace(), objectMetaInfo, generateRedisClusterParams(cr, service.getReplicaCount(cr), service.ExternalConfig, service), redisClusterAsOwner(cr), generateRedisClusterInitContainerParams(cr), generateRedisClusterContainerParams(cl, logger, cr, service.SecurityContext, service.ReadinessProbe, service.LivenessProbe, service.RedisStateFulType), cr.Spec.Sidecars, - cl, ) if err != nil { logger.Error(err, "Cannot create statefulset for Redis", "Setup.Type", service.RedisStateFulType) diff --git a/k8sutils/redis-replication.go b/k8sutils/redis-replication.go index 4a780de06..967c0925d 100644 --- a/k8sutils/redis-replication.go +++ b/k8sutils/redis-replication.go @@ -61,14 +61,16 @@ func CreateReplicationRedis(cr *redisv1beta2.RedisReplication, cl kubernetes.Int labels := getRedisLabels(cr.ObjectMeta.Name, replication, "replication", cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) - err := CreateOrUpdateStateFul(cr.Namespace, + err := CreateOrUpdateStateFul( + cl, + logger, + cr.GetNamespace(), objectMetaInfo, generateRedisReplicationParams(cr), redisReplicationAsOwner(cr), generateRedisReplicationInitContainerParams(cr), generateRedisReplicationContainerParams(cr), cr.Spec.Sidecars, - cl, ) if err != nil { logger.Error(err, "Cannot create replication statefulset for Redis") @@ -199,9 +201,9 @@ func generateRedisReplicationInitContainerParams(cr *redisv1beta2.RedisReplicati return initcontainerProp } -func IsRedisReplicationReady(ctx context.Context, logger logr.Logger, ki kubernetes.Interface, di dynamic.Interface, rs *redisv1beta2.RedisSentinel) bool { +func IsRedisReplicationReady(ctx context.Context, logger logr.Logger, client kubernetes.Interface, dClient dynamic.Interface, rs *redisv1beta2.RedisSentinel) bool { // statefulset name the same as the redis replication name - sts, err := GetStatefulSet(rs.Namespace, rs.Spec.RedisSentinelConfig.RedisReplicationName, ki) + sts, err := GetStatefulSet(client, logger, rs.GetNamespace(), rs.Spec.RedisSentinelConfig.RedisReplicationName) if err != nil { return false } @@ -211,7 +213,7 @@ func IsRedisReplicationReady(ctx context.Context, logger logr.Logger, ki kuberne // Enhanced check: When the pod is ready, it may not have been // created as part of a replication cluster, so we should verify // whether there is an actual master node. - if master := getRedisReplicationMasterIP(ctx, ki, logger, rs, di); master == "" { + if master := getRedisReplicationMasterIP(ctx, client, logger, rs, dClient); master == "" { return false } return true diff --git a/k8sutils/redis-sentinel.go b/k8sutils/redis-sentinel.go index ad0c754b6..2c0bf8605 100644 --- a/k8sutils/redis-sentinel.go +++ b/k8sutils/redis-sentinel.go @@ -68,14 +68,15 @@ func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, cl annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( - cr.Namespace, + cl, + logger, + cr.GetNamespace(), objectMetaInfo, generateRedisSentinelParams(cr, service.getSentinelCount(cr), service.ExternalConfig, service.Affinity), redisSentinelAsOwner(cr), generateRedisSentinelInitContainerParams(cr), generateRedisSentinelContainerParams(ctx, client, logger, cr, service.ReadinessProbe, service.LivenessProbe, dcl), cr.Spec.Sidecars, - cl, ) if err != nil { logger.Error(err, "Cannot create Sentinel statefulset for Redis") @@ -324,7 +325,15 @@ func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interfac } else if len(masterPods) == 1 { realMasterPod = masterPods[0] } else { - realMasterPod = checkAttachedSlave(ctx, client, logger, &replicationInstance, masterPods) + for _, podName := range masterPods { + redisClient := configureRedisReplicationClient(client, logger, &replicationInstance, podName) + defer redisClient.Close() + + if checkAttachedSlave(ctx, redisClient, logger, podName) > 0 { + realMasterPod = podName + break + } + } } realMasterInfo := RedisDetails{ diff --git a/k8sutils/redis-standalone.go b/k8sutils/redis-standalone.go index a99a45b92..5aae864c8 100644 --- a/k8sutils/redis-standalone.go +++ b/k8sutils/redis-standalone.go @@ -7,10 +7,6 @@ import ( "k8s.io/utils/pointer" ) -//var ( -// enableMetrics bool -//) - // CreateStandaloneService method will create standalone service for Redis func CreateStandaloneService(cr *redisv1beta2.Redis, cl kubernetes.Interface) error { logger := serviceLogger(cr.Namespace, cr.ObjectMeta.Name) @@ -60,14 +56,16 @@ func CreateStandaloneRedis(cr *redisv1beta2.Redis, cl kubernetes.Interface) erro labels := getRedisLabels(cr.ObjectMeta.Name, standalone, "standalone", cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name, cr.Namespace, labels, annotations) - err := CreateOrUpdateStateFul(cr.Namespace, + err := CreateOrUpdateStateFul( + cl, + logger, + cr.GetNamespace(), objectMetaInfo, generateRedisStandaloneParams(cr), redisAsOwner(cr), generateRedisStandaloneInitContainerParams(cr), generateRedisStandaloneContainerParams(cr), cr.Spec.Sidecars, - cl, ) if err != nil { logger.Error(err, "Cannot create standalone statefulset for Redis") diff --git a/k8sutils/redis.go b/k8sutils/redis.go index ee0db0a17..b7f74ef05 100644 --- a/k8sutils/redis.go +++ b/k8sutils/redis.go @@ -488,7 +488,7 @@ func configureRedisReplicationClient(client kubernetes.Interface, logger logr.Lo // Get Redis nodes by it's role i.e. master, slave and sentinel func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, redisRole string) []string { - statefulset, err := GetStatefulSet(cr.Namespace, cr.Name, cl) + statefulset, err := GetStatefulSet(cl, logger, cr.GetNamespace(), cr.GetName()) if err != nil { logger.Error(err, "Failed to Get the Statefulset of the", "custom resource", cr.Name, "in namespace", cr.Namespace) } @@ -498,7 +498,9 @@ func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger lo for i := 0; i < int(replicas); i++ { podName := statefulset.Name + "-" + strconv.Itoa(i) - podRole := checkRedisServerRole(ctx, cl, logger, cr, podName) + redisClient := configureRedisReplicationClient(cl, logger, cr, podName) + defer redisClient.Close() + podRole := checkRedisServerRole(ctx, redisClient, logger, podName) if podRole == redisRole { pods = append(pods, podName) } @@ -508,57 +510,63 @@ func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger lo } // Check the Redis Server Role i.e. master, slave and sentinel -func checkRedisServerRole(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, podName string) string { - redisClient := configureRedisReplicationClient(client, logger, cr, podName) - defer redisClient.Close() - info, err := redisClient.Info(ctx, "replication").Result() +func checkRedisServerRole(ctx context.Context, redisClient *redis.Client, logger logr.Logger, podName string) string { + info, err := redisClient.Info(ctx, "Replication").Result() if err != nil { logger.Error(err, "Failed to Get the role Info of the", "redis pod", podName) + return "" } - lines := strings.Split(info, "\r\n") - role := "" for _, line := range lines { if strings.HasPrefix(line, "role:") { - role = strings.TrimPrefix(line, "role:") - break + role := strings.TrimPrefix(line, "role:") + logger.V(1).Info("Role of the Redis Pod", "pod", podName, "role", role) + return role } } - - return role + logger.Error(err, "Failed to find role from Info # Replication in", "redis pod", podName) + return "" } // checkAttachedSlave would return redis pod name which has slave -func checkAttachedSlave(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, masterPods []string) string { - for _, podName := range masterPods { - connected_slaves := "" - redisClient := configureRedisReplicationClient(client, logger, cr, podName) - defer redisClient.Close() - info, err := redisClient.Info(ctx, "replication").Result() - if err != nil { - logger.Error(err, "Failed to Get the connected slaves Info of the", "redis pod", podName) - } - - lines := strings.Split(info, "\r\n") +func checkAttachedSlave(ctx context.Context, redisClient *redis.Client, logger logr.Logger, podName string) int { + info, err := redisClient.Info(ctx, "Replication").Result() + if err != nil { + logger.Error(err, "Failed to get the connected slaves count of the", "redis pod", podName) + return -1 // return -1 if failed to get the connected slaves count + } - for _, line := range lines { - if strings.HasPrefix(line, "connected_slaves:") { - connected_slaves = strings.TrimPrefix(line, "connected_slaves:") - break + lines := strings.Split(info, "\r\n") + for _, line := range lines { + if strings.HasPrefix(line, "connected_slaves:") { + var connected_slaves int + connected_slaves, err = strconv.Atoi(strings.TrimPrefix(line, "connected_slaves:")) + if err != nil { + logger.Error(err, "Failed to convert the connected slaves count of the", "redis pod", podName) + return -1 } - } - - nums, _ := strconv.Atoi(connected_slaves) - if nums > 0 { - return podName + logger.V(1).Info("Connected Slaves of the Redis Pod", "pod", podName, "connected_slaves", connected_slaves) + return connected_slaves } } - return "" + + logger.Error(nil, "Failed to find connected_slaves from Info # Replication in", "redis pod", podName) + return 0 } func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, masterPods []string, slavePods []string) error { var realMasterPod string - realMasterPod = checkAttachedSlave(ctx, client, logger, cr, masterPods) + + for _, podName := range masterPods { + redisClient := configureRedisReplicationClient(client, logger, cr, podName) + defer redisClient.Close() + + if checkAttachedSlave(ctx, redisClient, logger, podName) > 0 { + realMasterPod = podName + break + } + } + // realMasterPod = checkAttachedSlave(ctx, client, logger, cr, masterPods) if len(slavePods) < 1 { realMasterPod = masterPods[0] diff --git a/k8sutils/redis_test.go b/k8sutils/redis_test.go index 732654592..a1dc9d961 100644 --- a/k8sutils/redis_test.go +++ b/k8sutils/redis_test.go @@ -1,6 +1,7 @@ package k8sutils import ( + "context" "encoding/csv" "fmt" "strings" @@ -8,7 +9,10 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" mock_utils "github.com/OT-CONTAINER-KIT/redis-operator/mocks/utils" + "github.com/go-logr/logr" "github.com/go-logr/logr/testr" + "github.com/go-redis/redismock/v9" + redis "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -512,3 +516,148 @@ func TestGetContainerID(t *testing.T) { }) } } + +func Test_checkAttachedSlave(t *testing.T) { + logger := logr.Discard() + + tests := []struct { + name string + podName string + infoReturn string + infoErr error + expectedSlaveCount int + }{ + { + name: "no attached slaves", + podName: "pod1", + infoReturn: "# Replication\r\n" + + "role:master\r\n" + + "connected_slaves:0\r\n" + + "master_failover_state:no-failover\r\n" + + "master_replid:7b634a76ebb7d5f07007f1d5aec8abff8200704e\r\n" + + "master_replid2:0000000000000000000000000000000000000000\r\n" + + "master_repl_offset:0\r\n" + + "second_repl_offset:-1\r\n" + + "repl_backlog_active:0\r\n" + + "repl_backlog_size:1048576\r\n" + + "repl_backlog_first_byte_offset:0\r\n" + + "repl_backlog_histlen:0\r\n", + expectedSlaveCount: 0, + }, + { + name: "two attached slaves", + podName: "pod2", + infoReturn: "# Replication\r\n" + + "role:master\r\n" + + "connected_slaves:2\r\n" + + "master_failover_state:no-failover\r\n" + + "master_replid:7b634a76ebb7d5f07007f1d5aec8abff8200704e\r\n" + + "master_replid2:0000000000000000000000000000000000000000\r\n" + + "master_repl_offset:0\r\n" + + "second_repl_offset:-1\r\n" + + "repl_backlog_active:0\r\n" + + "repl_backlog_size:1048576\r\n" + + "repl_backlog_first_byte_offset:0\r\n" + + "repl_backlog_histlen:0\r\n", + expectedSlaveCount: 2, + }, + { + name: "error fetching slave info", + podName: "pod3", + infoReturn: "", + infoErr: redis.ErrClosed, + expectedSlaveCount: -1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.TODO() + client, mock := redismock.NewClientMock() + + if tt.infoErr != nil { + mock.ExpectInfo("Replication").SetErr(tt.infoErr) + } else { + mock.ExpectInfo("Replication").SetVal(tt.infoReturn) + } + + slaveCount := checkAttachedSlave(ctx, client, logger, tt.podName) + assert.Equal(t, tt.expectedSlaveCount, slaveCount, "Test case: "+tt.name) + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unmet expectations: %s", err) + } + }) + } +} + +func Test_checkRedisServerRole(t *testing.T) { + logger := logr.Discard() + + tests := []struct { + name string + podName string + infoReturn string + infoErr error + expectedResult string + }{ + { + name: "redis master role", + podName: "pod1", + infoReturn: "# Replication\r\n" + + "role:master\r\n" + + "connected_slaves:0\r\n" + + "master_failover_state:no-failover\r\n" + + "master_replid:7b634a76ebb7d5f07007f1d5aec8abff8200704e\r\n" + + "master_replid2:0000000000000000000000000000000000000000\r\n" + + "master_repl_offset:0\r\n" + + "second_repl_offset:-1\r\n" + + "repl_backlog_active:0\r\n" + + "repl_backlog_size:1048576\r\n" + + "repl_backlog_first_byte_offset:0\r\n" + + "repl_backlog_histlen:0\r\n", + expectedResult: "master", + }, + { + name: "redis slave role", + podName: "pod2", + infoReturn: "# Replication\r\n" + + "role:slave\r\n" + + "connected_slaves:0\r\n" + + "master_failover_state:no-failover\r\n" + + "master_replid:7b634a76ebb7d5f07007f1d5aec8abff8200704e\r\n" + + "master_replid2:0000000000000000000000000000000000000000\r\n" + + "master_repl_offset:0\r\n" + + "second_repl_offset:-1\r\n" + + "repl_backlog_active:0\r\n" + + "repl_backlog_size:1048576\r\n" + + "repl_backlog_first_byte_offset:0\r\n" + + "repl_backlog_histlen:0\r\n", + expectedResult: "slave", + }, + { + name: "error fetching role info", + podName: "pod3", + infoErr: redis.ErrClosed, + expectedResult: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.TODO() + client, mock := redismock.NewClientMock() + + if tt.infoErr != nil { + mock.ExpectInfo("Replication").SetErr(tt.infoErr) + } else { + mock.ExpectInfo("Replication").SetVal(tt.infoReturn) + } + + role := checkRedisServerRole(ctx, client, logger, tt.podName) + assert.Equal(t, tt.expectedResult, role, "Test case: "+tt.name) + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unmet expectations: %s", err) + } + }) + } +} diff --git a/k8sutils/statefulset.go b/k8sutils/statefulset.go index b732a3afc..5492b7216 100644 --- a/k8sutils/statefulset.go +++ b/k8sutils/statefulset.go @@ -95,9 +95,8 @@ type initContainerParameters struct { } // CreateOrUpdateStateFul method will create or update Redis service -func CreateOrUpdateStateFul(namespace string, stsMeta metav1.ObjectMeta, params statefulSetParameters, ownerDef metav1.OwnerReference, initcontainerParams initContainerParameters, containerParams containerParameters, sidecars *[]redisv1beta2.Sidecar, cl kubernetes.Interface) error { - logger := statefulSetLogger(namespace, stsMeta.Name) - storedStateful, err := GetStatefulSet(namespace, stsMeta.Name, cl) +func CreateOrUpdateStateFul(cl kubernetes.Interface, logger logr.Logger, namespace string, stsMeta metav1.ObjectMeta, params statefulSetParameters, ownerDef metav1.OwnerReference, initcontainerParams initContainerParameters, containerParams containerParameters, sidecars *[]redisv1beta2.Sidecar) error { + storedStateful, err := GetStatefulSet(cl, logger, namespace, stsMeta.Name) statefulSetDef := generateStatefulSetsDef(stsMeta, params, ownerDef, initcontainerParams, containerParams, getSidecars(sidecars)) if err != nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(statefulSetDef); err != nil { //nolint @@ -105,7 +104,7 @@ func CreateOrUpdateStateFul(namespace string, stsMeta metav1.ObjectMeta, params return err } if apierrors.IsNotFound(err) { - return createStatefulSet(namespace, statefulSetDef, cl) + return createStatefulSet(cl, logger, namespace, statefulSetDef) } return err } @@ -688,8 +687,7 @@ func getEnvironmentVariables(role string, enabledPassword *bool, secretName *str } // createStatefulSet is a method to create statefulset in Kubernetes -func createStatefulSet(namespace string, stateful *appsv1.StatefulSet, cl kubernetes.Interface) error { - logger := statefulSetLogger(stateful.Namespace, stateful.Name) +func createStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace string, stateful *appsv1.StatefulSet) error { _, err := cl.AppsV1().StatefulSets(namespace).Create(context.TODO(), stateful, metav1.CreateOptions{}) if err != nil { logger.Error(err, "Redis stateful creation failed") @@ -726,12 +724,11 @@ func updateStatefulSet(namespace string, stateful *appsv1.StatefulSet, recreateS } // GetStateFulSet is a method to get statefulset in Kubernetes -func GetStatefulSet(namespace string, stateful string, cl kubernetes.Interface) (*appsv1.StatefulSet, error) { - logger := statefulSetLogger(namespace, stateful) +func GetStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace string, name string) (*appsv1.StatefulSet, error) { getOpts := metav1.GetOptions{ TypeMeta: generateMetaInformation("StatefulSet", "apps/v1"), } - statefulInfo, err := cl.AppsV1().StatefulSets(namespace).Get(context.TODO(), stateful, getOpts) + statefulInfo, err := cl.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, getOpts) if err != nil { logger.V(1).Info("Redis statefulset get action failed") return nil, err diff --git a/k8sutils/statefulset_test.go b/k8sutils/statefulset_test.go index 879c3242e..bdc8d430d 100644 --- a/k8sutils/statefulset_test.go +++ b/k8sutils/statefulset_test.go @@ -6,8 +6,12 @@ import ( common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sClientFake "k8s.io/client-go/kubernetes/fake" "k8s.io/utils/pointer" ) @@ -186,6 +190,99 @@ func TestGetVolumeMount(t *testing.T) { } } +func Test_GetStatefulSet(t *testing.T) { + logger := logr.Discard() + + tests := []struct { + name string + sts appsv1.StatefulSet + stsName string + stsNamespace string + present bool + }{ + { + name: "StatefulSet present", + sts: appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + Namespace: "test-ns", + }, + }, + stsName: "test-sts", + stsNamespace: "test-ns", + present: true, + }, + { + name: "StatefulSet not present", + sts: appsv1.StatefulSet{}, + stsName: "test-sts", + stsNamespace: "test-ns", + present: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client := k8sClientFake.NewSimpleClientset(test.sts.DeepCopy()) + _, err := GetStatefulSet(client, logger, test.stsNamespace, test.stsName) + if test.present { + assert.Nil(t, err) + } else { + assert.NotNil(t, err) + } + }) + } +} + +func Test_createStatefulSet(t *testing.T) { + logger := logr.Discard() + + tests := []struct { + name string + sts appsv1.StatefulSet + present bool + }{ + { + name: "StatefulSet present", + sts: appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + Namespace: "test-ns", + }, + }, + + present: true, + }, + { + name: "StatefulSet not present", + sts: appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + Namespace: "test-ns", + }, + }, + present: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var client *k8sClientFake.Clientset + if test.present { + client = k8sClientFake.NewSimpleClientset(test.sts.DeepCopy()) + } else { + client = k8sClientFake.NewSimpleClientset() + } + err := createStatefulSet(client, logger, test.sts.GetNamespace(), &test.sts) + if test.present { + assert.NotNil(t, err) + } else { + assert.Nil(t, err) + } + }) + } +} + func TestGenerateTLSEnvironmentVariables(t *testing.T) { tlsConfig := &redisv1beta2.TLSConfig{ TLSConfig: common.TLSConfig{