From a44f4928a12bcff0d27ee8f3ba6417d22999c192 Mon Sep 17 00:00:00 2001 From: kerthcet Date: Thu, 4 Apr 2024 00:09:02 +0800 Subject: [PATCH 1/3] Add updatedReplicas to status Signed-off-by: kerthcet --- .../v1/leaderworkerset_types.go | 3 + ...erworkerset.x-k8s.io_leaderworkersets.yaml | 4 ++ pkg/controllers/leaderworkerset_controller.go | 19 +++-- .../controllers/leaderworkerset_test.go | 72 ++++++++++--------- test/testutils/util.go | 17 +++-- test/testutils/validators.go | 16 +++++ 6 files changed, 85 insertions(+), 46 deletions(-) diff --git a/api/leaderworkerset/v1/leaderworkerset_types.go b/api/leaderworkerset/v1/leaderworkerset_types.go index 09c61602..c6ebcdbb 100644 --- a/api/leaderworkerset/v1/leaderworkerset_types.go +++ b/api/leaderworkerset/v1/leaderworkerset_types.go @@ -187,6 +187,9 @@ type LeaderWorkerSetStatus struct { // ReadyReplicas track the number of groups that are in ready state. ReadyReplicas int `json:"readyReplicas,omitempty"` + // UpdatedReplicas track the number of groups that have been updated. + UpdatedReplicas int `json:"updatedReplicas,omitempty"` + // Replicas track the active total number of groups. Replicas int `json:"replicas,omitempty"` diff --git a/config/crd/bases/leaderworkerset.x-k8s.io_leaderworkersets.yaml b/config/crd/bases/leaderworkerset.x-k8s.io_leaderworkersets.yaml index 598662e3..c6855ed0 100644 --- a/config/crd/bases/leaderworkerset.x-k8s.io_leaderworkersets.yaml +++ b/config/crd/bases/leaderworkerset.x-k8s.io_leaderworkersets.yaml @@ -15407,6 +15407,10 @@ spec: replicas: description: Replicas track the active total number of groups. type: integer + updatedReplicas: + description: UpdatedReplicas track the number of groups that have + been updated. + type: integer type: object type: object served: true diff --git a/pkg/controllers/leaderworkerset_controller.go b/pkg/controllers/leaderworkerset_controller.go index 68b96368..bf01777d 100644 --- a/pkg/controllers/leaderworkerset_controller.go +++ b/pkg/controllers/leaderworkerset_controller.go @@ -346,6 +346,7 @@ func (r *LeaderWorkerSetReconciler) updateConditions(ctx context.Context, lws *l updateStatus := false readyCount := 0 + updatedCount := 0 templateHash := utils.LeaderWorkerTemplateHash(lws) // Iterate through all statefulsets. @@ -354,8 +355,10 @@ func (r *LeaderWorkerSetReconciler) updateConditions(ctx context.Context, lws *l continue } + var replicaReady bool // this is the worker statefulset. - if sts.Labels[leaderworkerset.TemplateRevisionHashKey] == templateHash && statefulsetutils.StatefulsetReady(sts) { + if statefulsetutils.StatefulsetReady(sts) { + // the worker pods are OK. // need to check leader pod for this group. var leaderPod corev1.Pod @@ -363,10 +366,13 @@ func (r *LeaderWorkerSetReconciler) updateConditions(ctx context.Context, lws *l log.Error(err, "Fetching leader pod") return false, err } - if leaderPod.Labels[leaderworkerset.TemplateRevisionHashKey] == templateHash && podutils.PodRunningAndReady(leaderPod) { - // set to progressing. + if podutils.PodRunningAndReady(leaderPod) { + replicaReady = true readyCount++ } + if replicaReady && sts.Labels[leaderworkerset.TemplateRevisionHashKey] == templateHash && leaderPod.Labels[leaderworkerset.TemplateRevisionHashKey] == templateHash { + updatedCount++ + } } } @@ -375,7 +381,12 @@ func (r *LeaderWorkerSetReconciler) updateConditions(ctx context.Context, lws *l updateStatus = true } - condition := makeCondition(readyCount == int(*lws.Spec.Replicas)) + if lws.Status.UpdatedReplicas != updatedCount { + lws.Status.UpdatedReplicas = updatedCount + updateStatus = true + } + + condition := makeCondition(updatedCount == int(*lws.Spec.Replicas)) updateCondition := setCondition(lws, condition) // if condition changed, record events if updateCondition { diff --git a/test/integration/controllers/leaderworkerset_test.go b/test/integration/controllers/leaderworkerset_test.go index 608a3d47..d0deed74 100644 --- a/test/integration/controllers/leaderworkerset_test.go +++ b/test/integration/controllers/leaderworkerset_test.go @@ -511,45 +511,43 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 0) testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectValidWorkerStatefulSets(ctx, lws, k8sClient, true) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 4) }, }, { // Check the rolling update initial state. lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) { - var leaderworkerset leaderworkerset.LeaderWorkerSet - gomega.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name, Namespace: lws.Namespace}, &leaderworkerset)).To(gomega.Succeed()) - testing.UpdateLeaderTemplate(ctx, k8sClient, &leaderworkerset) + testing.UpdateLeaderTemplate(ctx, k8sClient, lws) }, checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) { testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready") testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 3) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 0) }, }, { // Rolling update 1 replica. lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) { - var sts appsv1.StatefulSet - gomega.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name + "-3", Namespace: lws.Namespace}, &sts)).To(gomega.Succeed()) - testing.SetPodGroupToReady(ctx, k8sClient, &sts, lws) + testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-3", lws) }, checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) { testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready") testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 2) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 1) }, }, { // Update the 1-index replica will not change the partition. lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) { - var sts appsv1.StatefulSet - gomega.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name + "-1", Namespace: lws.Namespace}, &sts)).To(gomega.Succeed()) - testing.SetPodGroupToReady(ctx, k8sClient, &sts, lws) + testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-1", lws) }, checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) { testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready") testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 2) testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 2) }, }, { @@ -563,6 +561,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready") testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 2) testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 3, 1) }, }, { @@ -575,6 +574,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { testing.ExpectValidWorkerStatefulSets(ctx, lws, k8sClient, true) testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 0) testing.ExpectLeaderWorkerSetAvailable(ctx, k8sClient, lws, "All replicas are ready") + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 4) }, }, }, @@ -594,61 +594,57 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectValidWorkerStatefulSets(ctx, lws, k8sClient, true) testing.ExpectLeaderWorkerSetAvailable(ctx, k8sClient, lws, "All replicas are ready") + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 4) }, }, { // Update the worker template. lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) { - var leaderworkerset leaderworkerset.LeaderWorkerSet - gomega.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name, Namespace: lws.Namespace}, &leaderworkerset)).To(gomega.Succeed()) - testing.UpdateWorkerTemplate(ctx, k8sClient, &leaderworkerset) + testing.UpdateWorkerTemplate(ctx, k8sClient, lws) }, checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) { testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready") testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 2) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 0) }, }, { // Rolling update index-3 replica. lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) { - var sts appsv1.StatefulSet - gomega.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name + "-3", Namespace: lws.Namespace}, &sts)).To(gomega.Succeed()) - testing.SetPodGroupToReady(ctx, k8sClient, &sts, lws) + testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-3", lws) }, checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) { testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready") testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 1) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 1) }, }, { // Rolling update index-2 replicas. lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) { - var sts appsv1.StatefulSet - gomega.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name + "-2", Namespace: lws.Namespace}, &sts)).To(gomega.Succeed()) - testing.SetPodGroupToReady(ctx, k8sClient, &sts, lws) + testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-2", lws) }, checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) { testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready") testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 0) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 2) }, }, { // Rolling update the rest 2 replicas. lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) { - var sts1, sts2 appsv1.StatefulSet - gomega.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name + "-1", Namespace: lws.Namespace}, &sts1)).To(gomega.Succeed()) - testing.SetPodGroupToReady(ctx, k8sClient, &sts1, lws) - gomega.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name + "-0", Namespace: lws.Namespace}, &sts2)).To(gomega.Succeed()) - testing.SetPodGroupToReady(ctx, k8sClient, &sts2, lws) + testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-1", lws) + testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-0", lws) }, checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) { testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectValidWorkerStatefulSets(ctx, lws, k8sClient, true) testing.ExpectLeaderWorkerSetAvailable(ctx, k8sClient, lws, "All replicas are ready") testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 0) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 4) }, }, }, @@ -668,6 +664,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectValidWorkerStatefulSets(ctx, lws, k8sClient, true) testing.ExpectLeaderWorkerSetAvailable(ctx, k8sClient, lws, "All replicas are ready") + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 4) }, }, { @@ -676,7 +673,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { var leaderworkerset leaderworkerset.LeaderWorkerSet gomega.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name, Namespace: lws.Namespace}, &leaderworkerset)).To(gomega.Succeed()) leaderworkerset.Spec.Replicas = ptr.To[int32](6) - leaderworkerset.Spec.LeaderWorkerTemplate.WorkerTemplate.Spec.Containers[0].Image = "nginx:1.16.1" + leaderworkerset.Spec.LeaderWorkerTemplate.WorkerTemplate.Spec.Containers[0].Name = "new-worker" gomega.Expect(k8sClient.Update(ctx, &leaderworkerset)).To(gomega.Succeed()) var leaderSts appsv1.StatefulSet @@ -689,6 +686,8 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready") // When scaling up the Replicas, Partition will not change, so the new created Pods will apply with the new template. testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 4) + // We haven't set the replica-4, replica-5 to ready, so the readyReplicas is 4, the updatedReplicas is 0. + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 0) }, }, { @@ -701,6 +700,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { testing.ExpectValidWorkerStatefulSets(ctx, lws, k8sClient, true) testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 0) testing.ExpectLeaderWorkerSetAvailable(ctx, k8sClient, lws, "All replicas are ready") + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 6, 6) }, }, }, @@ -720,37 +720,31 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 0) testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectValidWorkerStatefulSets(ctx, lws, k8sClient, true) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 4) }, }, { // Update the worker template. lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) { - gomega.Eventually(func() error { - var leaderworkerset leaderworkerset.LeaderWorkerSet - if err := k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name, Namespace: lws.Namespace}, &leaderworkerset); err != nil { - return err - } - leaderworkerset.Spec.LeaderWorkerTemplate.WorkerTemplate.Spec.Containers[0].Image = "nginx:1.16.1" - return k8sClient.Update(ctx, &leaderworkerset) - }, testing.Timeout, testing.Interval).Should(gomega.Succeed()) + testing.UpdateWorkerTemplate(ctx, k8sClient, lws) }, checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) { testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready") testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 3) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 0) }, }, { // Rolling update index-3 replica. lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) { - var sts appsv1.StatefulSet - gomega.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name + "-3", Namespace: lws.Namespace}, &sts)).To(gomega.Succeed()) - testing.SetPodGroupToReady(ctx, k8sClient, &sts, lws) + testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-3", lws) }, checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) { testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready") testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 2) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 1) }, }, { @@ -770,11 +764,14 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { gomega.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: leaderworkerset.Name, Namespace: leaderworkerset.Namespace}, &leaderSts)).To(gomega.Succeed()) // Manually create leader pods here because we have no statefulset controller. gomega.Expect(testing.CreateLeaderPods(ctx, leaderSts, k8sClient, lws, 4, 6)).To(gomega.Succeed()) + testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-4", lws) + testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-5", lws) }, checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) { testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready") testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 2) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 6, 3) }, }, { @@ -787,6 +784,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { testing.ExpectValidWorkerStatefulSets(ctx, lws, k8sClient, true) testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 0) testing.ExpectLeaderWorkerSetAvailable(ctx, k8sClient, lws, "All replicas are ready") + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 6, 6) }, }, }, @@ -806,6 +804,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 0) testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectValidWorkerStatefulSets(ctx, lws, k8sClient, true) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 6, 6) }, }, { @@ -824,6 +823,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready") testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 5) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 6, 0) }, }, { @@ -845,6 +845,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready") testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 2) + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 3, 0) }, }, { @@ -857,6 +858,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { testing.ExpectValidWorkerStatefulSets(ctx, lws, k8sClient, true) testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 0) testing.ExpectLeaderWorkerSetAvailable(ctx, k8sClient, lws, "All replicas are ready") + testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 3, 3) }, }, }, diff --git a/test/testutils/util.go b/test/testutils/util.go index 432180eb..cb982eaa 100644 --- a/test/testutils/util.go +++ b/test/testutils/util.go @@ -147,20 +147,23 @@ func SetPodGroupsToReady(ctx context.Context, k8sClient client.Client, lws *lead for i, sts := range stsList.Items { if sts.Name != lws.Name { - SetPodGroupToReady(ctx, k8sClient, &stsList.Items[i], lws) + SetPodGroupToReady(ctx, k8sClient, stsList.Items[i].Name, lws) } } } // SetPodGroupToReady set one podGroup(leaderPod+workerStatefulset) of leaderWorkerSet to ready state, workerPods not included. -func SetPodGroupToReady(ctx context.Context, k8sClient client.Client, statefulset *appsv1.StatefulSet, lws *leaderworkerset.LeaderWorkerSet) { - hash := utils.LeaderWorkerTemplateHash(lws) - +func SetPodGroupToReady(ctx context.Context, k8sClient client.Client, statefulsetName string, lws *leaderworkerset.LeaderWorkerSet) { gomega.Eventually(func() error { var leaderPod corev1.Pod - if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: statefulset.Namespace, Name: statefulset.Name}, &leaderPod); err != nil { + if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: lws.Namespace, Name: statefulsetName}, &leaderPod); err != nil { + return err + } + + if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: lws.Namespace, Name: lws.Name}, lws); err != nil { return err } + hash := utils.LeaderWorkerTemplateHash(lws) leaderPod.Labels[leaderworkerset.TemplateRevisionHashKey] = hash return k8sClient.Update(ctx, &leaderPod) @@ -168,7 +171,7 @@ func SetPodGroupToReady(ctx context.Context, k8sClient client.Client, statefulse gomega.Eventually(func() error { var leaderPod corev1.Pod - if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: statefulset.Namespace, Name: statefulset.Name}, &leaderPod); err != nil { + if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: lws.Namespace, Name: statefulsetName}, &leaderPod); err != nil { return err } @@ -183,7 +186,7 @@ func SetPodGroupToReady(ctx context.Context, k8sClient client.Client, statefulse gomega.Eventually(func() error { var sts appsv1.StatefulSet - if err := k8sClient.Get(ctx, types.NamespacedName{Name: statefulset.Name, Namespace: statefulset.Namespace}, &sts); err != nil { + if err := k8sClient.Get(ctx, types.NamespacedName{Name: statefulsetName, Namespace: lws.Namespace}, &sts); err != nil { return err } diff --git a/test/testutils/validators.go b/test/testutils/validators.go index 5ff2c1d4..542f4ffe 100644 --- a/test/testutils/validators.go +++ b/test/testutils/validators.go @@ -322,6 +322,22 @@ func ExpectLeaderWorkerSetNotProgressing(ctx context.Context, k8sClient client.C gomega.Eventually(CheckLeaderWorkerSetHasCondition, Timeout, Interval).WithArguments(ctx, k8sClient, lws, condition).Should(gomega.Equal(true)) } +func ExpectLeaderWorkerSetStatusReplicas(ctx context.Context, k8sClient client.Client, lws *leaderworkerset.LeaderWorkerSet, readyReplicas, updatedReplicas int) { + ginkgo.By("checking leaderworkerset status replicas") + gomega.Eventually(func() error { + if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: lws.Namespace, Name: lws.Name}, lws); err != nil { + return err + } + if lws.Status.ReadyReplicas != readyReplicas { + return fmt.Errorf("readyReplicas in status not match, want: %d, got %d", readyReplicas, lws.Status.ReadyReplicas) + } + if lws.Status.UpdatedReplicas != updatedReplicas { + return fmt.Errorf("updatedReplicas in status not match, want: %d, got %d", updatedReplicas, lws.Status.UpdatedReplicas) + } + return nil + }, Timeout, Interval).Should(gomega.Succeed()) +} + func ExpectLeaderWorkerSetAvailable(ctx context.Context, k8sClient client.Client, lws *leaderworkerset.LeaderWorkerSet, message string) { ginkgo.By(fmt.Sprintf("checking leaderworkerset status(%s) is true", leaderworkerset.LeaderWorkerSetAvailable)) condition := metav1.Condition{ From ccc1223c1561853b36574f1547fff8764103e030 Mon Sep 17 00:00:00 2001 From: kerthcet Date: Thu, 4 Apr 2024 00:19:46 +0800 Subject: [PATCH 2/3] Change replica type from int to int32 Signed-off-by: kerthcet --- api/leaderworkerset/v1/leaderworkerset_types.go | 6 +++--- .../leaderworkerset.x-k8s.io_leaderworkersets.yaml | 3 +++ pkg/controllers/leaderworkerset_controller.go | 12 ++++++------ test/e2e/suite_test.go | 2 +- test/integration/controllers/leaderworkerset_test.go | 12 ++++++------ test/testutils/validators.go | 4 ++-- 6 files changed, 21 insertions(+), 18 deletions(-) diff --git a/api/leaderworkerset/v1/leaderworkerset_types.go b/api/leaderworkerset/v1/leaderworkerset_types.go index c6ebcdbb..3312e006 100644 --- a/api/leaderworkerset/v1/leaderworkerset_types.go +++ b/api/leaderworkerset/v1/leaderworkerset_types.go @@ -185,13 +185,13 @@ type LeaderWorkerSetStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty"` // ReadyReplicas track the number of groups that are in ready state. - ReadyReplicas int `json:"readyReplicas,omitempty"` + ReadyReplicas int32 `json:"readyReplicas,omitempty"` // UpdatedReplicas track the number of groups that have been updated. - UpdatedReplicas int `json:"updatedReplicas,omitempty"` + UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` // Replicas track the active total number of groups. - Replicas int `json:"replicas,omitempty"` + Replicas int32 `json:"replicas,omitempty"` // HPAPodSelector for pods that belong to the LeaderWorkerSet object, this is // needed for HPA to know what pods belong to the LeaderWorkerSet object. Here diff --git a/config/crd/bases/leaderworkerset.x-k8s.io_leaderworkersets.yaml b/config/crd/bases/leaderworkerset.x-k8s.io_leaderworkersets.yaml index c6855ed0..6b123d5e 100644 --- a/config/crd/bases/leaderworkerset.x-k8s.io_leaderworkersets.yaml +++ b/config/crd/bases/leaderworkerset.x-k8s.io_leaderworkersets.yaml @@ -15403,13 +15403,16 @@ spec: readyReplicas: description: ReadyReplicas track the number of groups that are in ready state. + format: int32 type: integer replicas: description: Replicas track the active total number of groups. + format: int32 type: integer updatedReplicas: description: UpdatedReplicas track the number of groups that have been updated. + format: int32 type: integer type: object type: object diff --git a/pkg/controllers/leaderworkerset_controller.go b/pkg/controllers/leaderworkerset_controller.go index bf01777d..6213efb3 100644 --- a/pkg/controllers/leaderworkerset_controller.go +++ b/pkg/controllers/leaderworkerset_controller.go @@ -376,13 +376,13 @@ func (r *LeaderWorkerSetReconciler) updateConditions(ctx context.Context, lws *l } } - if lws.Status.ReadyReplicas != readyCount { - lws.Status.ReadyReplicas = readyCount + if lws.Status.ReadyReplicas != int32(readyCount) { + lws.Status.ReadyReplicas = int32(readyCount) updateStatus = true } - if lws.Status.UpdatedReplicas != updatedCount { - lws.Status.UpdatedReplicas = updatedCount + if lws.Status.UpdatedReplicas != int32(updatedCount) { + lws.Status.UpdatedReplicas = int32(updatedCount) updateStatus = true } @@ -409,8 +409,8 @@ func (r *LeaderWorkerSetReconciler) updateStatus(ctx context.Context, lws *leade // retrieve the current number of replicas -- the number of leaders replicas := int(*sts.Spec.Replicas) - if lws.Status.Replicas != replicas { - lws.Status.Replicas = replicas + if lws.Status.Replicas != int32(replicas) { + lws.Status.Replicas = int32(replicas) updateStatus = true } diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index 4abc2ec1..7f11540e 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -40,7 +40,7 @@ import ( ) const ( - timeout = 1 * time.Minute + timeout = 30 * time.Second interval = time.Millisecond * 250 ) diff --git a/test/integration/controllers/leaderworkerset_test.go b/test/integration/controllers/leaderworkerset_test.go index d0deed74..1a8e8c8b 100644 --- a/test/integration/controllers/leaderworkerset_test.go +++ b/test/integration/controllers/leaderworkerset_test.go @@ -287,7 +287,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { var scale v1.Scale gomega.Expect(k8sClient.SubResource("scale").Get(ctx, lws, &scale)).To(gomega.Succeed()) gomega.Expect(int32(scale.Spec.Replicas)).To(gomega.Equal(*lws.Spec.Replicas)) - gomega.Expect(int(scale.Status.Replicas)).To(gomega.Equal(lws.Status.Replicas)) + gomega.Expect(int32(scale.Status.Replicas)).To(gomega.Equal(lws.Status.Replicas)) gomega.Expect(lws.Status.HPAPodSelector).To(gomega.Equal("leaderworkerset.sigs.k8s.io/name=test-sample,leaderworkerset.sigs.k8s.io/worker-index=0")) }, }, @@ -301,7 +301,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { var scale v1.Scale gomega.Expect(k8sClient.SubResource("scale").Get(ctx, lws, &scale)).To(gomega.Succeed()) gomega.Expect(int32(scale.Spec.Replicas)).To(gomega.Equal(*lws.Spec.Replicas)) - gomega.Expect(int(scale.Status.Replicas)).To(gomega.Equal(lws.Status.Replicas)) + gomega.Expect(int32(scale.Status.Replicas)).To(gomega.Equal(lws.Status.Replicas)) gomega.Expect(lws.Status.HPAPodSelector).To(gomega.Equal("leaderworkerset.sigs.k8s.io/name=test-sample,leaderworkerset.sigs.k8s.io/worker-index=0")) }, }, @@ -319,13 +319,13 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { gomega.Expect(k8sClient.SubResource("scale").Update(ctx, lwsUnstructed, client.WithSubResourceBody(scaleUnstructed))).To(gomega.Succeed()) }, checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) { - gomega.Eventually(func() (int, error) { + gomega.Eventually(func() (int32, error) { var leaderWorkerSet leaderworkerset.LeaderWorkerSet if err := k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name, Namespace: lws.Namespace}, &leaderWorkerSet); err != nil { return -1, err } return leaderWorkerSet.Status.Replicas, nil - }, testing.Timeout, testing.Interval).Should(gomega.Equal(3)) + }, testing.Timeout, testing.Interval).Should(gomega.Equal(int32(3))) }, }, }, @@ -451,13 +451,13 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() { updates: []*update{ { checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) { - gomega.Eventually(func() (int, error) { + gomega.Eventually(func() (int32, error) { var leaderWorkerSet leaderworkerset.LeaderWorkerSet if err := k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name, Namespace: lws.Namespace}, &leaderWorkerSet); err != nil { return -1, err } return leaderWorkerSet.Status.Replicas, nil - }, testing.Timeout, testing.Interval).Should(gomega.Equal(2)) + }, testing.Timeout, testing.Interval).Should(gomega.Equal(int32(2))) testing.ExpectValidLeaderStatefulSet(ctx, lws, k8sClient) testing.ExpectValidWorkerStatefulSets(ctx, lws, k8sClient, true) testing.ExpectLeaderWorkerSetProgressing(ctx, k8sClient, lws, "Replicas are progressing") diff --git a/test/testutils/validators.go b/test/testutils/validators.go index 542f4ffe..f19d343d 100644 --- a/test/testutils/validators.go +++ b/test/testutils/validators.go @@ -328,10 +328,10 @@ func ExpectLeaderWorkerSetStatusReplicas(ctx context.Context, k8sClient client.C if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: lws.Namespace, Name: lws.Name}, lws); err != nil { return err } - if lws.Status.ReadyReplicas != readyReplicas { + if lws.Status.ReadyReplicas != int32(readyReplicas) { return fmt.Errorf("readyReplicas in status not match, want: %d, got %d", readyReplicas, lws.Status.ReadyReplicas) } - if lws.Status.UpdatedReplicas != updatedReplicas { + if lws.Status.UpdatedReplicas != int32(updatedReplicas) { return fmt.Errorf("updatedReplicas in status not match, want: %d, got %d", updatedReplicas, lws.Status.UpdatedReplicas) } return nil From 832e8611eb6704e8349ffca80b9b23d66eded176 Mon Sep 17 00:00:00 2001 From: kerthcet Date: Sat, 6 Apr 2024 12:36:31 +0800 Subject: [PATCH 3/3] fix comments Signed-off-by: kerthcet --- pkg/controllers/leaderworkerset_controller.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pkg/controllers/leaderworkerset_controller.go b/pkg/controllers/leaderworkerset_controller.go index 6213efb3..0e3abebc 100644 --- a/pkg/controllers/leaderworkerset_controller.go +++ b/pkg/controllers/leaderworkerset_controller.go @@ -355,7 +355,6 @@ func (r *LeaderWorkerSetReconciler) updateConditions(ctx context.Context, lws *l continue } - var replicaReady bool // this is the worker statefulset. if statefulsetutils.StatefulsetReady(sts) { @@ -367,11 +366,11 @@ func (r *LeaderWorkerSetReconciler) updateConditions(ctx context.Context, lws *l return false, err } if podutils.PodRunningAndReady(leaderPod) { - replicaReady = true readyCount++ - } - if replicaReady && sts.Labels[leaderworkerset.TemplateRevisionHashKey] == templateHash && leaderPod.Labels[leaderworkerset.TemplateRevisionHashKey] == templateHash { - updatedCount++ + + if sts.Labels[leaderworkerset.TemplateRevisionHashKey] == templateHash && leaderPod.Labels[leaderworkerset.TemplateRevisionHashKey] == templateHash { + updatedCount++ + } } } }