Skip to content

Commit

Permalink
test(client): Add Redis Mock Test code (#842)
Browse files Browse the repository at this point in the history
* test(client) : Add Redis Mock Test code

Signed-off-by: Shubham Gupta <[email protected]>

* test(client): Add for followers node id's

Signed-off-by: Shubham Gupta <[email protected]>

* test: cover checkAttachedSlave

Signed-off-by: Shubham Gupta <[email protected]>

* test: Add redis server role

Signed-off-by: Shubham Gupta <[email protected]>

* test: add redis operator pod logs

Signed-off-by: Shubham Gupta <[email protected]>

* chore: add log statement

Signed-off-by: Shubham Gupta <[email protected]>

* chore: fix logs

Signed-off-by: Shubham Gupta <[email protected]>

* test: fix unit test

Signed-off-by: Shubham Gupta <[email protected]>

* fix: return data

Signed-off-by: Shubham Gupta <[email protected]>

* fix the role

Signed-off-by: Shubham Gupta <[email protected]>

* chore: remove comment

Signed-off-by: Shubham Gupta <[email protected]>

* refactor: move logger to params

Signed-off-by: Shubham Gupta <[email protected]>

* test: Add test Get statefulset

Signed-off-by: Shubham Gupta <[email protected]>

---------

Signed-off-by: Shubham Gupta <[email protected]>
  • Loading branch information
shubham-cmyk authored Mar 29, 2024
1 parent a6d372d commit aeba019
Show file tree
Hide file tree
Showing 12 changed files with 504 additions and 74 deletions.
4 changes: 2 additions & 2 deletions controllers/rediscluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request
return ctrl.Result{}, err
}

redisLeaderInfo, err := k8sutils.GetStatefulSet(instance.Namespace, instance.ObjectMeta.Name+"-leader", r.K8sClient)
redisLeaderInfo, err := k8sutils.GetStatefulSet(r.K8sClient, r.Log, instance.GetNamespace(), instance.GetName()+"-leader")
if err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{RequeueAfter: time.Second * 60}, nil
Expand Down Expand Up @@ -164,7 +164,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request
return ctrl.Result{}, err
}
}
redisFollowerInfo, err := k8sutils.GetStatefulSet(instance.Namespace, instance.ObjectMeta.Name+"-follower", r.K8sClient)
redisFollowerInfo, err := k8sutils.GetStatefulSet(r.K8sClient, r.Log, instance.GetNamespace(), instance.GetName()+"-follower")
if err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{RequeueAfter: time.Second * 60}, nil
Expand Down
2 changes: 1 addition & 1 deletion controllers/redisreplication_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func (r *RedisReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Req

// Set Pod distruptiuon Budget Later

redisReplicationInfo, err := k8sutils.GetStatefulSet(instance.Namespace, instance.ObjectMeta.Name, r.K8sClient)
redisReplicationInfo, err := k8sutils.GetStatefulSet(r.K8sClient, r.Log, instance.GetNamespace(), instance.GetName())
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 60}, err
}
Expand Down
27 changes: 15 additions & 12 deletions k8sutils/cluster-scaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ import (
// NOTE: when all slot been transferred, the node become slave of the first master node.
func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, remove bool) {
ctx := context.TODO()
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()

var cmd []string
currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader")

Expand Down Expand Up @@ -62,7 +65,7 @@ func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *re
cmd = append(cmd, transferNodeID)

// Cluster Slots
slot := getRedisClusterSlots(ctx, client, logger, cr, removeNodeID)
slot := getRedisClusterSlots(ctx, redisClient, logger, removeNodeID)
cmd = append(cmd, "--cluster-slots")
cmd = append(cmd, slot)

Expand All @@ -81,12 +84,9 @@ func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *re
}
}

func getRedisClusterSlots(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, nodeID string) string {
func getRedisClusterSlots(ctx context.Context, redisClient *redis.Client, logger logr.Logger, nodeID string) string {
totalSlots := 0

redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()

redisSlots, err := redisClient.ClusterSlots(ctx).Result()
if err != nil {
logger.Error(err, "Failed to Get Cluster Slots")
Expand Down Expand Up @@ -168,14 +168,16 @@ func RebalanceRedisClusterEmptyMasters(client kubernetes.Interface, logger logr.

func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) {
totalRedisLeaderNodes := CheckRedisNodeCount(ctx, client, logger, cr, "leader")
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()

for i := 0; i < int(totalRedisLeaderNodes); i++ {
pod := RedisDetails{
PodName: cr.ObjectMeta.Name + "-leader-" + strconv.Itoa(i),
Namespace: cr.Namespace,
}
podNodeID := getRedisNodeID(ctx, client, logger, cr, pod)
podSlots := getRedisClusterSlots(ctx, client, logger, cr, podNodeID)
podSlots := getRedisClusterSlots(ctx, redisClient, logger, podNodeID)

if podSlots == "0" || podSlots == "" {
logger.V(1).Info("Found Empty Redis Leader Node", "pod", pod)
Expand Down Expand Up @@ -256,10 +258,7 @@ func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, log
}

// getAttachedFollowerNodeIDs would return a slice of redis followers attached to a redis leader
func getAttachedFollowerNodeIDs(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, masterNodeID string) []string {
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()

func getAttachedFollowerNodeIDs(ctx context.Context, redisClient *redis.Client, logger logr.Logger, masterNodeID string) []string {
slaveIDs, err := redisClient.ClusterSlaves(ctx, masterNodeID).Result()
if err != nil {
logger.Error(err, "Failed to get attached follower node IDs", "masterNodeID", masterNodeID)
Expand All @@ -272,6 +271,8 @@ func getAttachedFollowerNodeIDs(ctx context.Context, client kubernetes.Interface
// Remove redis follower node would remove all follower nodes of last leader node using redis-cli
func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) {
var cmd []string
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()
currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader")

existingPod := RedisDetails{
Expand All @@ -296,7 +297,7 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes.
cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...)

lastLeaderPodNodeID := getRedisNodeID(ctx, client, logger, cr, lastLeaderPod)
followerNodeIDs := getAttachedFollowerNodeIDs(ctx, client, logger, cr, lastLeaderPodNodeID)
followerNodeIDs := getAttachedFollowerNodeIDs(ctx, redisClient, logger, lastLeaderPodNodeID)

cmd = append(cmd, "--cluster", "del-node")
if *cr.Spec.ClusterVersion == "v7" {
Expand All @@ -316,6 +317,8 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes.
// Remove redis cluster node would remove last node to the existing redis cluster using redis-cli
func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, removePod RedisDetails) {
var cmd []string
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()
// currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader")

existingPod := RedisDetails{
Expand Down Expand Up @@ -350,7 +353,7 @@ func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface
cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...)

logger.V(1).Info("Redis cluster leader remove command is", "Command", cmd)
if getRedisClusterSlots(ctx, client, logger, cr, removePodNodeID) != "0" {
if getRedisClusterSlots(ctx, redisClient, logger, removePodNodeID) != "0" {
logger.V(1).Info("Skipping execution remove leader not empty", "cmd", cmd)
}
executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0")
Expand Down
166 changes: 166 additions & 0 deletions k8sutils/cluster-scaling_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package k8sutils

import (
"context"
"fmt"
"testing"

"github.com/go-logr/logr"
Expand Down Expand Up @@ -61,3 +62,168 @@ func Test_verifyLeaderPodInfo(t *testing.T) {
})
}
}

func Test_getRedisClusterSlots(t *testing.T) {
logger := logr.Discard()

tests := []struct {
name string
nodeID string
clusterSlots []redis.ClusterSlot
clusterSlotsErr error
expectedResult string
}{
{
name: "successful slot count",
nodeID: "node123",
clusterSlots: []redis.ClusterSlot{
{Start: 0, End: 4999, Nodes: []redis.ClusterNode{{ID: "node123"}}},
{Start: 5000, End: 9999, Nodes: []redis.ClusterNode{{ID: "node123"}}},
},
expectedResult: "10000",
},
{
name: "error fetching cluster slots",
nodeID: "node123",
clusterSlotsErr: redis.ErrClosed,
expectedResult: "",
},
{
name: "no slots for node",
nodeID: "node999",
clusterSlots: []redis.ClusterSlot{
{Start: 0, End: 4999, Nodes: []redis.ClusterNode{{ID: "node123"}}},
},
expectedResult: "0",
},
{
name: "slots for multiple nodes",
nodeID: "node123",
clusterSlots: []redis.ClusterSlot{
{Start: 0, End: 1999, Nodes: []redis.ClusterNode{{ID: "node123"}}},
{Start: 2000, End: 3999, Nodes: []redis.ClusterNode{{ID: "node456"}}},
{Start: 4000, End: 5999, Nodes: []redis.ClusterNode{{ID: "node123"}, {ID: "node789"}}},
},
expectedResult: "4000",
},
{
name: "single slot range",
nodeID: "node123",
clusterSlots: []redis.ClusterSlot{
{Start: 100, End: 100, Nodes: []redis.ClusterNode{{ID: "node123"}}},
},
expectedResult: "1",
},
{
name: "mixed slot ranges",
nodeID: "node123",
clusterSlots: []redis.ClusterSlot{
{Start: 0, End: 499, Nodes: []redis.ClusterNode{{ID: "node123"}}},
{Start: 500, End: 999, Nodes: []redis.ClusterNode{{ID: "node123"}, {ID: "node999"}}},
{Start: 1000, End: 1499, Nodes: []redis.ClusterNode{{ID: "node999"}}},
{Start: 1500, End: 1999, Nodes: []redis.ClusterNode{{ID: "node123"}}},
},
expectedResult: "1500",
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.TODO()
client, mock := redismock.NewClientMock()

if tt.clusterSlotsErr != nil {
mock.ExpectClusterSlots().SetErr(tt.clusterSlotsErr)
} else {
mock.ExpectClusterSlots().SetVal(tt.clusterSlots)
}

result := getRedisClusterSlots(ctx, client, logger, tt.nodeID)

assert.Equal(t, tt.expectedResult, result, "Test case: "+tt.name)

if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unmet expectations: %s", err)
}
})
}
}

func Test_getAttachedFollowerNodeIDs(t *testing.T) {
logger := logr.Discard()

tests := []struct {
name string
masterNodeID string
slaveNodeIDs []string
clusterSlavesErr error
expectedslaveNodeIDs []string
}{
{
name: "successful retrieval of slave nodes",
masterNodeID: "master123",
slaveNodeIDs: []string{"slave1", "slave2"},
expectedslaveNodeIDs: []string{"slave1", "slave2"},
},
{
name: "error fetching slave nodes",
masterNodeID: "master123",
clusterSlavesErr: redis.ErrClosed,
expectedslaveNodeIDs: nil,
},
{
name: "no attached slave nodes",
masterNodeID: "master456",
slaveNodeIDs: []string{},
expectedslaveNodeIDs: []string{},
},
{
name: "nil response for slave nodes",
masterNodeID: "masterNode123",
slaveNodeIDs: nil,
expectedslaveNodeIDs: nil,
clusterSlavesErr: nil,
},
{
name: "large number of attached slave nodes",
masterNodeID: "master123",
slaveNodeIDs: generateLargeListOfSlaves(1000), // Helper function needed
expectedslaveNodeIDs: generateLargeListOfSlaves(1000),
},
{
name: "invalid master node ID",
masterNodeID: "invalidMasterID",
slaveNodeIDs: nil,
expectedslaveNodeIDs: nil,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
client, mock := redismock.NewClientMock()

if tt.clusterSlavesErr != nil {
mock.ExpectClusterSlaves(tt.masterNodeID).SetErr(tt.clusterSlavesErr)
} else {
mock.ExpectClusterSlaves(tt.masterNodeID).SetVal(tt.slaveNodeIDs)
}

result := getAttachedFollowerNodeIDs(ctx, client, logger, tt.masterNodeID)

assert.ElementsMatch(t, tt.expectedslaveNodeIDs, result, "Test case: "+tt.name)

if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unmet expectations: %s", err)
}
})
}
}

func generateLargeListOfSlaves(n int) []string {
var slaves []string
for i := 0; i < n; i++ {
slaves = append(slaves, fmt.Sprintf("slaveNode%d", i))
}
return slaves
}
5 changes: 3 additions & 2 deletions k8sutils/redis-cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -270,14 +270,15 @@ func (service RedisClusterSTS) CreateRedisClusterSetup(cr *redisv1beta2.RedisClu
annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations)
objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations)
err := CreateOrUpdateStateFul(
cr.Namespace,
cl,
logger,
cr.GetNamespace(),
objectMetaInfo,
generateRedisClusterParams(cr, service.getReplicaCount(cr), service.ExternalConfig, service),
redisClusterAsOwner(cr),
generateRedisClusterInitContainerParams(cr),
generateRedisClusterContainerParams(cl, logger, cr, service.SecurityContext, service.ReadinessProbe, service.LivenessProbe, service.RedisStateFulType),
cr.Spec.Sidecars,
cl,
)
if err != nil {
logger.Error(err, "Cannot create statefulset for Redis", "Setup.Type", service.RedisStateFulType)
Expand Down
12 changes: 7 additions & 5 deletions k8sutils/redis-replication.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,14 +61,16 @@ func CreateReplicationRedis(cr *redisv1beta2.RedisReplication, cl kubernetes.Int
labels := getRedisLabels(cr.ObjectMeta.Name, replication, "replication", cr.ObjectMeta.Labels)
annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations)
objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations)
err := CreateOrUpdateStateFul(cr.Namespace,
err := CreateOrUpdateStateFul(
cl,
logger,
cr.GetNamespace(),
objectMetaInfo,
generateRedisReplicationParams(cr),
redisReplicationAsOwner(cr),
generateRedisReplicationInitContainerParams(cr),
generateRedisReplicationContainerParams(cr),
cr.Spec.Sidecars,
cl,
)
if err != nil {
logger.Error(err, "Cannot create replication statefulset for Redis")
Expand Down Expand Up @@ -199,9 +201,9 @@ func generateRedisReplicationInitContainerParams(cr *redisv1beta2.RedisReplicati
return initcontainerProp
}

func IsRedisReplicationReady(ctx context.Context, logger logr.Logger, ki kubernetes.Interface, di dynamic.Interface, rs *redisv1beta2.RedisSentinel) bool {
func IsRedisReplicationReady(ctx context.Context, logger logr.Logger, client kubernetes.Interface, dClient dynamic.Interface, rs *redisv1beta2.RedisSentinel) bool {
// statefulset name the same as the redis replication name
sts, err := GetStatefulSet(rs.Namespace, rs.Spec.RedisSentinelConfig.RedisReplicationName, ki)
sts, err := GetStatefulSet(client, logger, rs.GetNamespace(), rs.Spec.RedisSentinelConfig.RedisReplicationName)
if err != nil {
return false
}
Expand All @@ -211,7 +213,7 @@ func IsRedisReplicationReady(ctx context.Context, logger logr.Logger, ki kuberne
// Enhanced check: When the pod is ready, it may not have been
// created as part of a replication cluster, so we should verify
// whether there is an actual master node.
if master := getRedisReplicationMasterIP(ctx, ki, logger, rs, di); master == "" {
if master := getRedisReplicationMasterIP(ctx, client, logger, rs, dClient); master == "" {
return false
}
return true
Expand Down
15 changes: 12 additions & 3 deletions k8sutils/redis-sentinel.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,14 +68,15 @@ func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, cl
annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations)
objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations)
err := CreateOrUpdateStateFul(
cr.Namespace,
cl,
logger,
cr.GetNamespace(),
objectMetaInfo,
generateRedisSentinelParams(cr, service.getSentinelCount(cr), service.ExternalConfig, service.Affinity),
redisSentinelAsOwner(cr),
generateRedisSentinelInitContainerParams(cr),
generateRedisSentinelContainerParams(ctx, client, logger, cr, service.ReadinessProbe, service.LivenessProbe, dcl),
cr.Spec.Sidecars,
cl,
)
if err != nil {
logger.Error(err, "Cannot create Sentinel statefulset for Redis")
Expand Down Expand Up @@ -324,7 +325,15 @@ func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interfac
} else if len(masterPods) == 1 {
realMasterPod = masterPods[0]
} else {
realMasterPod = checkAttachedSlave(ctx, client, logger, &replicationInstance, masterPods)
for _, podName := range masterPods {
redisClient := configureRedisReplicationClient(client, logger, &replicationInstance, podName)
defer redisClient.Close()

if checkAttachedSlave(ctx, redisClient, logger, podName) > 0 {
realMasterPod = podName
break
}
}
}

realMasterInfo := RedisDetails{
Expand Down
Loading

0 comments on commit aeba019

Please sign in to comment.