Skip to content

Commit

Permalink
fix : Add client for Testing (OT-CONTAINER-KIT#700)
Browse files Browse the repository at this point in the history
* mock client

Signed-off-by: Shubham Gupta <[email protected]>

* feat: add client

Signed-off-by: Shubham Gupta <[email protected]>

* Add finalizers

Signed-off-by: Shubham Gupta <[email protected]>

* fix

Signed-off-by: Shubham Gupta <[email protected]>

* Add Test

Signed-off-by: Shubham Gupta <[email protected]>

* fix

Signed-off-by: Shubham Gupta <[email protected]>

* fix

Signed-off-by: Shubham Gupta <[email protected]>

---------

Signed-off-by: Shubham Gupta <[email protected]>
Signed-off-by: Matt Robinson <[email protected]>
  • Loading branch information
shubham-cmyk authored and mattrobinsonsre committed Jul 11, 2024
1 parent 6745d79 commit f7d2c5e
Show file tree
Hide file tree
Showing 6 changed files with 238 additions and 47 deletions.
10 changes: 7 additions & 3 deletions controllers/rediscluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,19 @@ import (
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)

// RedisClusterReconciler reconciles a RedisCluster object
type RedisClusterReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
K8sClient kubernetes.Interface
Dk8sClinet dynamic.Interface
Log logr.Logger
Scheme *runtime.Scheme
}

// Reconcile is part of the main kubernetes reconciliation loop
Expand All @@ -61,7 +65,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request
followerReplicas := instance.Spec.GetReplicaCounts("follower")
totalReplicas := leaderReplicas + followerReplicas

if err = k8sutils.HandleRedisClusterFinalizer(instance, r.Client); err != nil {
if err = k8sutils.HandleRedisClusterFinalizer(r.Client, r.K8sClient, r.Log, instance); err != nil {
return ctrl.Result{RequeueAfter: time.Second * 60}, err
}

Expand Down
10 changes: 7 additions & 3 deletions controllers/redisreplication_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,19 @@ import (
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)

// RedisReplicationReconciler reconciles a RedisReplication object
type RedisReplicationReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
K8sClient kubernetes.Interface
Dk8sClinet dynamic.Interface
Log logr.Logger
Scheme *runtime.Scheme
}

func (r *RedisReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
Expand All @@ -44,7 +48,7 @@ func (r *RedisReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Req
followerReplicas := instance.Spec.GetReplicationCounts("replication") - leaderReplicas
totalReplicas := leaderReplicas + followerReplicas

if err = k8sutils.HandleRedisReplicationFinalizer(instance, r.Client); err != nil {
if err = k8sutils.HandleRedisReplicationFinalizer(r.Client, r.K8sClient, r.Log, instance); err != nil {
return ctrl.Result{}, err
}

Expand Down
8 changes: 6 additions & 2 deletions controllers/redissentinel_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,19 @@ import (
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)

// RedisSentinelReconciler reconciles a RedisSentinel object
type RedisSentinelReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
K8sClient kubernetes.Interface
Dk8sClinet dynamic.Interface
Log logr.Logger
Scheme *runtime.Scheme
}

// Reconcile is part of the main kubernetes reconciliation loop which aims
Expand Down
41 changes: 12 additions & 29 deletions k8sutils/finalizer.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,8 @@ package k8sutils
import (
"context"
"fmt"
"strconv"

redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2"
mockLog "github.com/OT-CONTAINER-KIT/redis-operator/mocks/log"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -46,15 +44,14 @@ func HandleRedisFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interfa
}

// HandleRedisClusterFinalizer finalize resource if instance is marked to be deleted
func HandleRedisClusterFinalizer(cr *redisv1beta2.RedisCluster, cl client.Client) error {
logger := finalizerLogger(cr.Namespace, RedisClusterFinalizer)
func HandleRedisClusterFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error {
if cr.GetDeletionTimestamp() != nil {
if controllerutil.ContainsFinalizer(cr, RedisClusterFinalizer) {
if err := finalizeRedisClusterPVC(cr); err != nil {
if err := finalizeRedisClusterPVC(k8sClient, logger, cr); err != nil {
return err
}
controllerutil.RemoveFinalizer(cr, RedisClusterFinalizer)
if err := cl.Update(context.TODO(), cr); err != nil {
if err := ctrlclient.Update(context.TODO(), cr); err != nil {
logger.Error(err, "Could not remove finalizer "+RedisClusterFinalizer)
return err
}
Expand All @@ -64,15 +61,14 @@ func HandleRedisClusterFinalizer(cr *redisv1beta2.RedisCluster, cl client.Client
}

// Handle RedisReplicationFinalizer finalize resource if instance is marked to be deleted
func HandleRedisReplicationFinalizer(cr *redisv1beta2.RedisReplication, cl client.Client) error {
logger := finalizerLogger(cr.Namespace, RedisReplicationFinalizer)
func HandleRedisReplicationFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication) error {
if cr.GetDeletionTimestamp() != nil {
if controllerutil.ContainsFinalizer(cr, RedisReplicationFinalizer) {
if err := finalizeRedisReplicationPVC(cr); err != nil {
if err := finalizeRedisReplicationPVC(k8sClient, logger, cr); err != nil {
return err
}
controllerutil.RemoveFinalizer(cr, RedisReplicationFinalizer)
if err := cl.Update(context.TODO(), cr); err != nil {
if err := ctrlclient.Update(context.TODO(), cr); err != nil {
logger.Error(err, "Could not remove finalizer "+RedisReplicationFinalizer)
return err
}
Expand Down Expand Up @@ -136,7 +132,7 @@ func AddRedisSentinelFinalizer(cr *redisv1beta2.RedisSentinel, cl client.Client)
}

// finalizeRedisPVC delete PVC
func finalizeRedisPVC(client kubernetes.Interface, logger mockLog.LoggerInterface, cr *redisv1beta2.Redis) error {
func finalizeRedisPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.Redis) error {
PVCName := fmt.Sprintf("%s-%s-0", cr.Name, cr.Name)
err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
Expand All @@ -147,16 +143,10 @@ func finalizeRedisPVC(client kubernetes.Interface, logger mockLog.LoggerInterfac
}

// finalizeRedisClusterPVC delete PVCs
func finalizeRedisClusterPVC(cr *redisv1beta2.RedisCluster) error {
logger := finalizerLogger(cr.Namespace, RedisClusterFinalizer)
client, err := GenerateK8sClient(GenerateK8sConfig)
if err != nil {
logger.Error(err, "Could not generate kubernetes client")
return err
}
func finalizeRedisClusterPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error {
for _, role := range []string{"leader", "follower"} {
for i := 0; i < int(cr.Spec.GetReplicaCounts(role)); i++ {
PVCName := cr.Name + "-" + cr.Name + "-" + role + "-" + strconv.Itoa(i)
PVCName := fmt.Sprintf("%s-%s-%s-%d", cr.Name, cr.Name, role, i)
err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName)
Expand All @@ -165,7 +155,7 @@ func finalizeRedisClusterPVC(cr *redisv1beta2.RedisCluster) error {
}
if cr.Spec.Storage.NodeConfVolume {
for i := 0; i < int(cr.Spec.GetReplicaCounts(role)); i++ {
PVCName := "node-conf" + cr.Name + "-" + role + "-" + strconv.Itoa(i)
PVCName := fmt.Sprintf("%s-%s-%s-%d", "node-conf", cr.Name, role, i)
err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName)
Expand All @@ -179,22 +169,15 @@ func finalizeRedisClusterPVC(cr *redisv1beta2.RedisCluster) error {
}

// finalizeRedisReplicationPVC delete PVCs
func finalizeRedisReplicationPVC(cr *redisv1beta2.RedisReplication) error {
logger := finalizerLogger(cr.Namespace, RedisReplicationFinalizer)
client, err := GenerateK8sClient(GenerateK8sConfig)
if err != nil {
logger.Error(err, "Could not generate kubernetes client")
return err
}
func finalizeRedisReplicationPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication) error {
for i := 0; i < int(cr.Spec.GetReplicationCounts("replication")); i++ {
PVCName := cr.Name + "-" + cr.Name + "-" + strconv.Itoa(i)
PVCName := fmt.Sprintf("%s-%s-%d", cr.Name, cr.Name, i)
err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName)
return err
}
}

return nil
}

Expand Down
191 changes: 191 additions & 0 deletions k8sutils/finalizers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@ import (
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
k8sClientFake "k8s.io/client-go/kubernetes/fake"
"k8s.io/utils/pointer"
)

// func TestHandleRedisFinalizer(t *testing.T) {
Expand Down Expand Up @@ -120,3 +122,192 @@ func TestFinalizeRedisPVC(t *testing.T) {
})
}
}

func TestFinalizeRedisReplicationPVC(t *testing.T) {
tests := []struct {
name string
existingPVCs []*corev1.PersistentVolumeClaim
redisReplication *v1beta2.RedisReplication
expectError bool
}{
{
name: "Successful deletion of Redis Replication PVCs",
redisReplication: &v1beta2.RedisReplication{
ObjectMeta: metav1.ObjectMeta{
Name: "redis-replication",
Namespace: "redis",
},
Spec: v1beta2.RedisReplicationSpec{
Size: pointer.Int32(3),
},
},
existingPVCs: []*corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "redis-replication-redis-replication-0",
Namespace: "redis",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "redis-replication-redis-replication-1",
Namespace: "redis",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "redis-replication-redis-replication-2",
Namespace: "redis",
},
},
},
expectError: false,
},
{
name: "PVC does not exist and no error should be returned",
existingPVCs: nil,
redisReplication: &v1beta2.RedisReplication{
ObjectMeta: metav1.ObjectMeta{
Name: "redis-replication",
Namespace: "redis",
},
Spec: v1beta2.RedisReplicationSpec{
Size: pointer.Int32(3),
},
},
expectError: false,
},
}

for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
logger := testr.New(t)
var k8sClient *k8sClientFake.Clientset
if tc.existingPVCs != nil {
k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVCs)...)
} else {
k8sClient = k8sClientFake.NewSimpleClientset()
}

err := finalizeRedisReplicationPVC(k8sClient, logger, tc.redisReplication)
if tc.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}

// Verify PVCs are deleted
if !tc.expectError {
for _, pvc := range tc.existingPVCs {
_, err := k8sClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
assert.True(t, k8serrors.IsNotFound(err))
}
}
})
}
}

func TestFinalizeRedisClusterPVC(t *testing.T) {
tests := []struct {
name string
existingPVCs []*corev1.PersistentVolumeClaim
redisCluster *v1beta2.RedisCluster
expectError bool
}{
{
name: "Successful deletion of Redis Cluster PVCs",
redisCluster: &v1beta2.RedisCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "redis-cluster",
Namespace: "redis",
},
Spec: v1beta2.RedisClusterSpec{
Size: pointer.Int32(3),
Storage: &v1beta2.ClusterStorage{
NodeConfVolume: true,
},
},
},
existingPVCs: helperRedisClusterPVCs("redis-cluster", "redis"),
expectError: false,
},
{
name: "PVC does not exist and no error should be returned",
existingPVCs: nil,
redisCluster: &v1beta2.RedisCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "redis-cluster",
Namespace: "redis",
},
Spec: v1beta2.RedisClusterSpec{
Size: pointer.Int32(3),
Storage: &v1beta2.ClusterStorage{
NodeConfVolume: false,
},
},
},
expectError: false,
},
}

for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
logger := testr.New(t)
var k8sClient *k8sClientFake.Clientset
if tc.existingPVCs != nil {
k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVCs)...)
} else {
k8sClient = k8sClientFake.NewSimpleClientset()
}

err := finalizeRedisClusterPVC(k8sClient, logger, tc.redisCluster)
if tc.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}

// Verify PVCs are deleted
if !tc.expectError {
for _, pvc := range tc.existingPVCs {
_, err := k8sClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
assert.True(t, k8serrors.IsNotFound(err))
}
}
})
}
}

func helperToRuntimeObjects(pvcs []*corev1.PersistentVolumeClaim) []runtime.Object {
objs := make([]runtime.Object, len(pvcs))
for i, pvc := range pvcs {
objs[i] = pvc.DeepCopyObject()
}
return objs
}

func helperRedisClusterPVCs(clusterName string, namespace string) []*corev1.PersistentVolumeClaim {
var pvcs []*corev1.PersistentVolumeClaim
roles := []string{"leader", "follower"}
for _, role := range roles {
for i := 0; i < 3; i++ {
clusterPVCName := fmt.Sprintf("%s-%s-%s-%d", clusterName, clusterName, role, i)
clusterPVC := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: clusterPVCName,
Namespace: namespace,
},
}
pvcs = append(pvcs, clusterPVC)
nodeConfPVCName := fmt.Sprintf("node-conf-%s-%s-%d", clusterName, role, i)
nodeConfPVC := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: nodeConfPVCName,
Namespace: namespace,
},
}
pvcs = append(pvcs, nodeConfPVC)
}
}
return pvcs
}
Loading

0 comments on commit f7d2c5e

Please sign in to comment.