Skip to content

Commit

Permalink
[from now] 2024/07/04 12:04:06
Browse files Browse the repository at this point in the history
diff --git a/e2e/e2e_test.go b/e2e/e2e_test.go
index 2f42448..9aa3e12 100644
--- a/e2e/e2e_test.go
+++ b/e2e/e2e_test.go
@@ -12,8 +12,10 @@ import (
 	optimizerv1alpha1 "github.com/oviceinc/rollout-optimizer-controller/api/v1alpha1"
 	"github.com/oviceinc/rollout-optimizer-controller/e2e/pkg/fixtures"
 	"github.com/oviceinc/rollout-optimizer-controller/e2e/pkg/util"
+	appsv1 "k8s.io/api/apps/v1"
 	corev1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/apimachinery/pkg/util/wait"
 	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
 	"k8s.io/client-go/rest"
@@ -26,6 +28,12 @@ var (
 	cfg       *rest.Config
 	k8sClient client.Client
 	scheme    = runtime.NewScheme()
+	namespace = "default"
+	rollout   = "example"
+	scaledown = "example-scaledown"
+	perOnce   = 1
+	coolTime  = 120
+	replicas  = 4
 )

 var _ = BeforeSuite(func() {
@@ -99,15 +107,15 @@ var _ = Describe("E2E", func() {
 		}

 		// Apply Rollout
-		if err := fixtures.ApplyRollout(ctx, k8sClient, "default", "example"); err != nil {
+		if err := fixtures.ApplyRollout(ctx, k8sClient, namespace, rollout, "nginx:1.25.0", replicas); err != nil {
 			klog.Error(err)
 		}
 		// Apply RolloutScaleDown
-		if err := fixtures.ApplyRolloutScaledown(ctx, k8sClient, "default", "example-scaledown", "example"); err != nil {
+		if err := fixtures.ApplyRolloutScaledown(ctx, k8sClient, namespace, scaledown, rollout, perOnce, coolTime); err != nil {
 			klog.Error(err)
 		}
 		// Apply manager
-		if err := fixtures.ApplyManager(ctx, k8sClient, "default", "manager", os.Getenv("MANAGER_IMAGE")); err != nil {
+		if err := fixtures.ApplyManager(ctx, k8sClient, namespace, "manager", os.Getenv("MANAGER_IMAGE")); err != nil {
 			klog.Error(err)
 		}
 	})
@@ -115,21 +123,93 @@ var _ = Describe("E2E", func() {
 		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
 		defer cancel()

-		if err := fixtures.DeleteManager(ctx, k8sClient, "default", "manager"); err != nil {
+		if err := fixtures.DeleteManager(ctx, k8sClient, namespace, "manager"); err != nil {
 			klog.Error(err)
 		}

-		if err := fixtures.DeleteRolloutScaledown(ctx, k8sClient, "default", "example-scaledown"); err != nil {
+		if err := fixtures.DeleteRolloutScaledown(ctx, k8sClient, namespace, scaledown); err != nil {
 			klog.Error(err)
 		}

-		if err := fixtures.DeleteRollout(ctx, k8sClient, "default", "example"); err != nil {
+		if err := fixtures.DeleteRollout(ctx, k8sClient, namespace, rollout); err != nil {
 			klog.Error(err)
 		}
 	})
 	It("E2E", func() {
-		// 5minで全滅しないこと
-		// 10minで全滅すること
+		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
+		defer cancel()

+		r := &argorolloutsapiv1alpha1.Rollout{}
+		err := wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) {
+			err := k8sClient.Get(ctx, types.NamespacedName{Name: rollout, Namespace: namespace}, r)
+			if err != nil {
+				return false, err
+			}
+			if r.Status.Phase == argorolloutsapiv1alpha1.RolloutPhaseHealthy {
+				return true, nil
+			}
+			return false, nil
+		})
+		Expect(err).ShouldNot(HaveOccurred())
+
+		oldHash := r.Status.StableRS
+		oldRS := &appsv1.ReplicaSet{}
+		err = wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) {
+			err := k8sClient.Get(ctx, types.NamespacedName{Name: rollout + "-" + oldHash, Namespace: namespace}, oldRS)
+			if err != nil {
+				return false, err
+			}
+			if oldRS.Status.Replicas == oldRS.Status.ReadyReplicas {
+				return true, nil
+			}
+			return false, nil
+		})
+		Expect(err).ShouldNot(HaveOccurred())
+		Expect(oldRS.Status.Replicas).Should(Equal(int32(replicas)))
+
+		// Update Rollout
+		err = fixtures.UpdateRollout(ctx, k8sClient, namespace, rollout, "nginx:latest")
+		Expect(err).ShouldNot(HaveOccurred())
+
+		// Wait until rollout is healthy
+		err = wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) {
+			err := k8sClient.Get(ctx, types.NamespacedName{Name: rollout, Namespace: namespace}, r)
+			if err != nil {
+				return false, err
+			}
+			if r.Status.Phase == argorolloutsapiv1alpha1.RolloutPhaseHealthy {
+				return true, nil
+			}
+			return false, nil
+		})
+		Expect(err).ShouldNot(HaveOccurred())
+
+		time.Sleep(10 * time.Second)
+
+		// The old replica set should be scaled down
+		err = k8sClient.Get(ctx, types.NamespacedName{Name: oldRS.Name, Namespace: namespace}, oldRS)
+		Expect(err).ShouldNot(HaveOccurred())
+		Expect(oldRS.Status.Replicas).Should(Equal(int32(replicas - perOnce)))
+
+		// Wait 120 seconds
+		time.Sleep(120 * time.Second)
+		// The old replica set should be scaled down
+		err = k8sClient.Get(ctx, types.NamespacedName{Name: oldRS.Name, Namespace: namespace}, oldRS)
+		Expect(err).ShouldNot(HaveOccurred())
+		Expect(oldRS.Status.Replicas).Should(Equal(int32(replicas - perOnce*2)))
+
+		// Wait 120 seconds
+		time.Sleep(120 * time.Second)
+		// The old replica set should be scaled down
+		err = k8sClient.Get(ctx, types.NamespacedName{Name: oldRS.Name, Namespace: namespace}, oldRS)
+		Expect(err).ShouldNot(HaveOccurred())
+		Expect(oldRS.Status.Replicas).Should(Equal(int32(replicas - perOnce*3)))
+
+		// Wait 120 seconds
+		time.Sleep(120 * time.Second)
+		// The old replica set should be scaled down
+		err = k8sClient.Get(ctx, types.NamespacedName{Name: oldRS.Name, Namespace: namespace}, oldRS)
+		Expect(err).ShouldNot(HaveOccurred())
+		Expect(oldRS.Status.Replicas).Should(Equal(int32(replicas - perOnce*4)))
 	})
 })
diff --git a/e2e/pkg/fixtures/rollout.go b/e2e/pkg/fixtures/rollout.go
index 7f0db5a..daf1d49 100644
--- a/e2e/pkg/fixtures/rollout.go
+++ b/e2e/pkg/fixtures/rollout.go
@@ -16,12 +16,12 @@ var (
 	previewService = "preview"
 )

-func ApplyRollout(ctx context.Context, k8sClient client.Client, ns, name string) error {
-	if err := service(ctx, k8sClient, ns, activeService); err != nil {
+func ApplyRollout(ctx context.Context, k8sClient client.Client, ns, name, image string, replicas int) error {
+	if err := service(ctx, k8sClient, ns, activeService, name); err != nil {
 		return err
 	}

-	if err := service(ctx, k8sClient, ns, previewService); err != nil {
+	if err := service(ctx, k8sClient, ns, previewService, name); err != nil {
 		return err
 	}

@@ -29,9 +29,12 @@ func ApplyRollout(ctx context.Context, k8sClient client.Client, ns, name string)
 		ObjectMeta: metav1.ObjectMeta{
 			Name:      name,
 			Namespace: ns,
+			Labels: map[string]string{
+				"app": name,
+			},
 		},
 		Spec: argorolloutsapiv1alpha1.RolloutSpec{
-			Replicas: utilpointer.Int32(2),
+			Replicas: utilpointer.Int32(int32(replicas)),
 			Selector: &metav1.LabelSelector{
 				MatchLabels: map[string]string{
 					"app": name,
@@ -47,7 +50,7 @@ func ApplyRollout(ctx context.Context, k8sClient client.Client, ns, name string)
 					Containers: []corev1.Container{
 						{
 							Name:  name,
-							Image: "nginx:1.25",
+							Image: image,
 							Ports: []corev1.ContainerPort{
 								{
 									ContainerPort: 80,
@@ -71,6 +74,17 @@ func ApplyRollout(ctx context.Context, k8sClient client.Client, ns, name string)
 	return k8sClient.Create(ctx, rollout)
 }

+func UpdateRollout(ctx context.Context, k8sClient client.Client, ns, name, image string) error {
+	found := &argorolloutsapiv1alpha1.Rollout{}
+	err := k8sClient.Get(ctx, client.ObjectKey{Namespace: ns, Name: name}, found)
+	if err != nil {
+		return err
+	}
+
+	found.Spec.Template.Spec.Containers[0].Image = image
+	return k8sClient.Update(ctx, found)
+}
+
 func DeleteRollout(ctx context.Context, k8sClient client.Client, ns, name string) error {
 	if err := k8sClient.Delete(ctx, &corev1.Service{
 		ObjectMeta: metav1.ObjectMeta{
@@ -99,7 +113,7 @@ func DeleteRollout(ctx context.Context, k8sClient client.Client, ns, name string
 	return nil
 }

-func service(ctx context.Context, k8sClient client.Client, ns, name string) error {
+func service(ctx context.Context, k8sClient client.Client, ns, name, rollout string) error {
 	svc := &corev1.Service{
 		ObjectMeta: metav1.ObjectMeta{
 			Name:      name,
@@ -107,7 +121,7 @@ func service(ctx context.Context, k8sClient client.Client, ns, name string) erro
 		},
 		Spec: corev1.ServiceSpec{
 			Selector: map[string]string{
-				"app": name,
+				"app": rollout,
 			},
 			Ports: []corev1.ServicePort{
 				{
diff --git a/e2e/pkg/fixtures/rollout_scaledown.go b/e2e/pkg/fixtures/rollout_scaledown.go
index 15a6e37..5c48b54 100644
--- a/e2e/pkg/fixtures/rollout_scaledown.go
+++ b/e2e/pkg/fixtures/rollout_scaledown.go
@@ -8,7 +8,7 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/client"
 )

-func ApplyRolloutScaledown(ctx context.Context, k8sClient client.Client, ns, name, rollout string) error {
+func ApplyRolloutScaledown(ctx context.Context, k8sClient client.Client, ns, name, rollout string, perOnce, coolTime int) error {
 	scaledown := &optimizerv1alpha1.RolloutScaleDown{
 		ObjectMeta: metav1.ObjectMeta{
 			Name:      name,
@@ -16,8 +16,8 @@ func ApplyRolloutScaledown(ctx context.Context, k8sClient client.Client, ns, nam
 		},
 		Spec: optimizerv1alpha1.RolloutScaleDownSpec{
 			TargetRollout:    rollout,
-			TerminatePerOnce: 1,
-			CoolTimeSeconds:  300,
+			TerminatePerOnce: perOnce,
+			CoolTimeSeconds:  coolTime,
 		},
 	}
  • Loading branch information
h3poteto committed Jul 4, 2024
1 parent 0959b3d commit 77157af
Show file tree
Hide file tree
Showing 3 changed files with 112 additions and 18 deletions.
96 changes: 88 additions & 8 deletions e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@ import (
optimizerv1alpha1 "github.com/oviceinc/rollout-optimizer-controller/api/v1alpha1"
"github.com/oviceinc/rollout-optimizer-controller/e2e/pkg/fixtures"
"github.com/oviceinc/rollout-optimizer-controller/e2e/pkg/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
Expand All @@ -26,6 +28,12 @@ var (
cfg *rest.Config
k8sClient client.Client
scheme = runtime.NewScheme()
namespace = "default"
rollout = "example"
scaledown = "example-scaledown"
perOnce = 1
coolTime = 120
replicas = 4
)

var _ = BeforeSuite(func() {
Expand Down Expand Up @@ -99,37 +107,109 @@ var _ = Describe("E2E", func() {
}

// Apply Rollout
if err := fixtures.ApplyRollout(ctx, k8sClient, "default", "example"); err != nil {
if err := fixtures.ApplyRollout(ctx, k8sClient, namespace, rollout, "nginx:1.25.0", replicas); err != nil {
klog.Error(err)
}
// Apply RolloutScaleDown
if err := fixtures.ApplyRolloutScaledown(ctx, k8sClient, "default", "example-scaledown", "example"); err != nil {
if err := fixtures.ApplyRolloutScaledown(ctx, k8sClient, namespace, scaledown, rollout, perOnce, coolTime); err != nil {
klog.Error(err)
}
// Apply manager
if err := fixtures.ApplyManager(ctx, k8sClient, "default", "manager", os.Getenv("MANAGER_IMAGE")); err != nil {
if err := fixtures.ApplyManager(ctx, k8sClient, namespace, "manager", os.Getenv("MANAGER_IMAGE")); err != nil {
klog.Error(err)
}
})
AfterEach(func() {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()

if err := fixtures.DeleteManager(ctx, k8sClient, "default", "manager"); err != nil {
if err := fixtures.DeleteManager(ctx, k8sClient, namespace, "manager"); err != nil {
klog.Error(err)
}

if err := fixtures.DeleteRolloutScaledown(ctx, k8sClient, "default", "example-scaledown"); err != nil {
if err := fixtures.DeleteRolloutScaledown(ctx, k8sClient, namespace, scaledown); err != nil {
klog.Error(err)
}

if err := fixtures.DeleteRollout(ctx, k8sClient, "default", "example"); err != nil {
if err := fixtures.DeleteRollout(ctx, k8sClient, namespace, rollout); err != nil {
klog.Error(err)
}
})
It("E2E", func() {
// 5minで全滅しないこと
// 10minで全滅すること
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()

r := &argorolloutsapiv1alpha1.Rollout{}
err := wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) {
err := k8sClient.Get(ctx, types.NamespacedName{Name: rollout, Namespace: namespace}, r)
if err != nil {
return false, err
}
if r.Status.Phase == argorolloutsapiv1alpha1.RolloutPhaseHealthy {
return true, nil
}
return false, nil
})
Expect(err).ShouldNot(HaveOccurred())

oldHash := r.Status.StableRS
oldRS := &appsv1.ReplicaSet{}
err = wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) {
err := k8sClient.Get(ctx, types.NamespacedName{Name: rollout + "-" + oldHash, Namespace: namespace}, oldRS)
if err != nil {
return false, err
}
if oldRS.Status.Replicas == oldRS.Status.ReadyReplicas {
return true, nil
}
return false, nil
})
Expect(err).ShouldNot(HaveOccurred())
Expect(oldRS.Status.Replicas).Should(Equal(int32(replicas)))

// Update Rollout
err = fixtures.UpdateRollout(ctx, k8sClient, namespace, rollout, "nginx:latest")
Expect(err).ShouldNot(HaveOccurred())

// Wait until rollout is healthy
err = wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) {
err := k8sClient.Get(ctx, types.NamespacedName{Name: rollout, Namespace: namespace}, r)
if err != nil {
return false, err
}
if r.Status.Phase == argorolloutsapiv1alpha1.RolloutPhaseHealthy {
return true, nil
}
return false, nil
})
Expect(err).ShouldNot(HaveOccurred())

time.Sleep(10 * time.Second)

// The old replica set should be scaled down
err = k8sClient.Get(ctx, types.NamespacedName{Name: oldRS.Name, Namespace: namespace}, oldRS)
Expect(err).ShouldNot(HaveOccurred())
Expect(oldRS.Status.Replicas).Should(Equal(int32(replicas - perOnce)))

// Wait 120 seconds
time.Sleep(120 * time.Second)
// The old replica set should be scaled down
err = k8sClient.Get(ctx, types.NamespacedName{Name: oldRS.Name, Namespace: namespace}, oldRS)
Expect(err).ShouldNot(HaveOccurred())
Expect(oldRS.Status.Replicas).Should(Equal(int32(replicas - perOnce*2)))

// Wait 120 seconds
time.Sleep(120 * time.Second)
// The old replica set should be scaled down
err = k8sClient.Get(ctx, types.NamespacedName{Name: oldRS.Name, Namespace: namespace}, oldRS)
Expect(err).ShouldNot(HaveOccurred())
Expect(oldRS.Status.Replicas).Should(Equal(int32(replicas - perOnce*3)))

// Wait 120 seconds
time.Sleep(120 * time.Second)
// The old replica set should be scaled down
err = k8sClient.Get(ctx, types.NamespacedName{Name: oldRS.Name, Namespace: namespace}, oldRS)
Expect(err).ShouldNot(HaveOccurred())
Expect(oldRS.Status.Replicas).Should(Equal(int32(replicas - perOnce*4)))
})
})
28 changes: 21 additions & 7 deletions e2e/pkg/fixtures/rollout.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,22 +16,25 @@ var (
previewService = "preview"
)

func ApplyRollout(ctx context.Context, k8sClient client.Client, ns, name string) error {
if err := service(ctx, k8sClient, ns, activeService); err != nil {
func ApplyRollout(ctx context.Context, k8sClient client.Client, ns, name, image string, replicas int) error {
if err := service(ctx, k8sClient, ns, activeService, name); err != nil {
return err
}

if err := service(ctx, k8sClient, ns, previewService); err != nil {
if err := service(ctx, k8sClient, ns, previewService, name); err != nil {
return err
}

rollout := &argorolloutsapiv1alpha1.Rollout{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
Labels: map[string]string{
"app": name,
},
},
Spec: argorolloutsapiv1alpha1.RolloutSpec{
Replicas: utilpointer.Int32(2),
Replicas: utilpointer.Int32(int32(replicas)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": name,
Expand All @@ -47,7 +50,7 @@ func ApplyRollout(ctx context.Context, k8sClient client.Client, ns, name string)
Containers: []corev1.Container{
{
Name: name,
Image: "nginx:1.25",
Image: image,
Ports: []corev1.ContainerPort{
{
ContainerPort: 80,
Expand All @@ -71,6 +74,17 @@ func ApplyRollout(ctx context.Context, k8sClient client.Client, ns, name string)
return k8sClient.Create(ctx, rollout)
}

func UpdateRollout(ctx context.Context, k8sClient client.Client, ns, name, image string) error {
found := &argorolloutsapiv1alpha1.Rollout{}
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: ns, Name: name}, found)
if err != nil {
return err
}

found.Spec.Template.Spec.Containers[0].Image = image
return k8sClient.Update(ctx, found)
}

func DeleteRollout(ctx context.Context, k8sClient client.Client, ns, name string) error {
if err := k8sClient.Delete(ctx, &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Expand Down Expand Up @@ -99,15 +113,15 @@ func DeleteRollout(ctx context.Context, k8sClient client.Client, ns, name string
return nil
}

func service(ctx context.Context, k8sClient client.Client, ns, name string) error {
func service(ctx context.Context, k8sClient client.Client, ns, name, rollout string) error {
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
"app": name,
"app": rollout,
},
Ports: []corev1.ServicePort{
{
Expand Down
6 changes: 3 additions & 3 deletions e2e/pkg/fixtures/rollout_scaledown.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,16 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)

func ApplyRolloutScaledown(ctx context.Context, k8sClient client.Client, ns, name, rollout string) error {
func ApplyRolloutScaledown(ctx context.Context, k8sClient client.Client, ns, name, rollout string, perOnce, coolTime int) error {
scaledown := &optimizerv1alpha1.RolloutScaleDown{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: optimizerv1alpha1.RolloutScaleDownSpec{
TargetRollout: rollout,
TerminatePerOnce: 1,
CoolTimeSeconds: 300,
TerminatePerOnce: perOnce,
CoolTimeSeconds: coolTime,
},
}

Expand Down

0 comments on commit 77157af

Please sign in to comment.