From dffff4f557f4f5b434eb7c9bc4fc91b0386c7846 Mon Sep 17 00:00:00 2001 From: Yaroslava Serdiuk Date: Wed, 28 Feb 2024 12:14:49 +0200 Subject: [PATCH] Add ProvisioningRequest injector (#6529) * Add ProvisioningRequests injector * Add test case for Accepted conditions and add supported provreq classes list * Use Passive clock --- cluster-autoscaler/go.mod | 2 +- cluster-autoscaler/main.go | 5 + .../provreq/provisioning_request_injector.go | 101 +++++++++++++ .../provisioning_request_injector_test.go | 139 ++++++++++++++++++ .../autoscaling.x-k8s.io/v1beta1/types.go | 5 + .../checkcapacity/orchestrator.go | 9 +- .../checkcapacity/processor.go | 5 +- .../checkcapacity/processor_test.go | 33 +++-- .../condition_test.go | 17 ++- .../condition.go => conditions/conditions.go} | 44 +++--- 10 files changed, 313 insertions(+), 47 deletions(-) create mode 100644 cluster-autoscaler/processors/provreq/provisioning_request_injector.go create mode 100644 cluster-autoscaler/processors/provreq/provisioning_request_injector_test.go rename cluster-autoscaler/provisioningrequest/{checkcapacity => conditions}/condition_test.go (93%) rename cluster-autoscaler/provisioningrequest/{checkcapacity/condition.go => conditions/conditions.go} (65%) diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod index 8331f14305ca..2c2823e05990 100644 --- a/cluster-autoscaler/go.mod +++ b/cluster-autoscaler/go.mod @@ -1,6 +1,6 @@ module k8s.io/autoscaler/cluster-autoscaler -go 1.21 +go 1.21.3 require ( cloud.google.com/go/compute/metadata v0.2.3 diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index 8f388dbf0d27..92555ac6acdc 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -503,6 +503,11 @@ func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter return nil, err } opts.LoopStartNotifier = loopstart.NewObserversList([]loopstart.Observer{provreqProcesor}) + injector, err := provreq.NewProvisioningRequestPodsInjector(restConfig) + if err != nil { + return nil, err + } + podListProcessor.AddProcessor(injector) } opts.Processors.PodListProcessor = podListProcessor scaleDownCandidatesComparers := []scaledowncandidates.CandidatesComparer{} diff --git a/cluster-autoscaler/processors/provreq/provisioning_request_injector.go b/cluster-autoscaler/processors/provreq/provisioning_request_injector.go new file mode 100644 index 000000000000..3af5f177eafa --- /dev/null +++ b/cluster-autoscaler/processors/provreq/provisioning_request_injector.go @@ -0,0 +1,101 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provreq + +import ( + "time" + + apiv1 "k8s.io/api/core/v1" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/processors/pods" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/apis/autoscaling.x-k8s.io/v1beta1" + provreqconditions "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/conditions" + provreqpods "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/pods" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqclient" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + "k8s.io/utils/clock" +) + +const ( + defaultRetryTime = 10 * time.Minute +) + +// SupportedProvisioningClasses is a list of supported ProvisioningClasses in ClusterAutoscaler. +var SupportedProvisioningClasses = []string{v1beta1.ProvisioningClassCheckCapacity} + +// ProvisioningRequestPodsInjector creates in-memory pods from ProvisioningRequest and inject them to unscheduled pods list. +type ProvisioningRequestPodsInjector struct { + client provisioningRequestClient + clock clock.PassiveClock +} + +// Process pick one ProvisioningRequest, update Accepted condition and inject pods to unscheduled pods list. +func (p *ProvisioningRequestPodsInjector) Process( + _ *context.AutoscalingContext, + unschedulablePods []*apiv1.Pod, +) ([]*apiv1.Pod, error) { + provReqs, err := p.client.ProvisioningRequests() + if err != nil { + return nil, err + } + for _, pr := range provReqs { + conditions := pr.Conditions() + if apimeta.IsStatusConditionTrue(conditions, v1beta1.Failed) || apimeta.IsStatusConditionTrue(conditions, v1beta1.Provisioned) { + continue + } + + provisioned := apimeta.FindStatusCondition(conditions, v1beta1.Provisioned) + + //TODO(yaroslava): support exponential backoff + // Inject pods if ProvReq wasn't scaled up before or it has Provisioned == False condition more than defaultRetryTime + inject := true + if provisioned != nil { + if provisioned.Status == metav1.ConditionFalse && provisioned.LastTransitionTime.Add(defaultRetryTime).Before(p.clock.Now()) { + inject = true + } else { + inject = false + } + } + if inject { + provreqpods, err := provreqpods.PodsForProvisioningRequest(pr) + if err != nil { + klog.Errorf("Failed to get pods for ProvisioningRequest %v", pr.Name()) + provreqconditions.AddOrUpdateCondition(pr, v1beta1.Failed, metav1.ConditionTrue, provreqconditions.FailedToCreatePodsReason, err.Error(), metav1.NewTime(p.clock.Now())) + continue + } + unschedulablePods := append(unschedulablePods, provreqpods...) + provreqconditions.AddOrUpdateCondition(pr, v1beta1.Accepted, metav1.ConditionTrue, provreqconditions.AcceptedReason, provreqconditions.AcceptedMsg, metav1.NewTime(p.clock.Now())) + return unschedulablePods, nil + } + } + return unschedulablePods, nil +} + +// CleanUp cleans up the processor's internal structures. +func (p *ProvisioningRequestPodsInjector) CleanUp() {} + +// NewProvisioningRequestPodsInjector creates a ProvisioningRequest filter processor. +func NewProvisioningRequestPodsInjector(kubeConfig *rest.Config) (pods.PodListProcessor, error) { + client, err := provreqclient.NewProvisioningRequestClient(kubeConfig) + if err != nil { + return nil, err + } + return &ProvisioningRequestPodsInjector{client: client, clock: clock.RealClock{}}, nil +} diff --git a/cluster-autoscaler/processors/provreq/provisioning_request_injector_test.go b/cluster-autoscaler/processors/provreq/provisioning_request_injector_test.go new file mode 100644 index 000000000000..f379ffdad5cc --- /dev/null +++ b/cluster-autoscaler/processors/provreq/provisioning_request_injector_test.go @@ -0,0 +1,139 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provreq + +import ( + "context" + "testing" + "time" + + v1 "k8s.io/api/core/v1" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/apis/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqclient" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" + clock "k8s.io/utils/clock/testing" +) + +func TestProvisioningRequestPodsInjector(t *testing.T) { + now := time.Now() + minAgo := now.Add(-1 * time.Minute) + hourAgo := now.Add(-1 * time.Hour) + + accepted := metav1.Condition{ + Type: v1beta1.Accepted, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(minAgo), + } + failed := metav1.Condition{ + Type: v1beta1.Failed, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(hourAgo), + } + provisioned := metav1.Condition{ + Type: v1beta1.Provisioned, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(hourAgo), + } + notProvisioned := metav1.Condition{ + Type: v1beta1.Provisioned, + Status: metav1.ConditionFalse, + LastTransitionTime: metav1.NewTime(hourAgo), + } + unknownProvisioned := metav1.Condition{ + Type: v1beta1.Provisioned, + Status: metav1.ConditionUnknown, + LastTransitionTime: metav1.NewTime(hourAgo), + } + notProvisionedRecently := metav1.Condition{ + Type: v1beta1.Provisioned, + Status: metav1.ConditionFalse, + LastTransitionTime: metav1.NewTime(minAgo), + } + + podsA := 10 + newProvReqA := testProvisioningRequestWithCondition("new", podsA) + newAcceptedProvReqA := testProvisioningRequestWithCondition("new-accepted", podsA, accepted) + + podsB := 20 + notProvisionedAcceptedProvReqB := testProvisioningRequestWithCondition("provisioned-false-B", podsB, notProvisioned, accepted) + provisionedAcceptedProvReqB := testProvisioningRequestWithCondition("provisioned-and-accepted", podsB, provisioned, accepted) + failedProvReq := testProvisioningRequestWithCondition("failed", podsA, failed) + notProvisionedRecentlyProvReqB := testProvisioningRequestWithCondition("provisioned-false-recently-B", podsB, notProvisionedRecently) + unknownProvisionedProvReqB := testProvisioningRequestWithCondition("provisioned-unknown-B", podsB, unknownProvisioned) + + testCases := []struct { + name string + provReqs []*provreqwrapper.ProvisioningRequest + wantUnscheduledPodCount int + wantUpdatedConditionName string + }{ + { + name: "New ProvisioningRequest, pods are injected and Accepted condition is added", + provReqs: []*provreqwrapper.ProvisioningRequest{newProvReqA, provisionedAcceptedProvReqB}, + wantUnscheduledPodCount: podsA, + wantUpdatedConditionName: newProvReqA.Name(), + }, + { + name: "New ProvisioningRequest, pods are injected and Accepted condition is updated", + provReqs: []*provreqwrapper.ProvisioningRequest{newAcceptedProvReqA, provisionedAcceptedProvReqB}, + wantUnscheduledPodCount: podsA, + wantUpdatedConditionName: newAcceptedProvReqA.Name(), + }, + { + name: "Provisioned=False, pods are injected", + provReqs: []*provreqwrapper.ProvisioningRequest{notProvisionedAcceptedProvReqB, failedProvReq}, + wantUnscheduledPodCount: podsB, + wantUpdatedConditionName: notProvisionedAcceptedProvReqB.Name(), + }, + { + name: "Provisioned=True, no pods are injected", + provReqs: []*provreqwrapper.ProvisioningRequest{provisionedAcceptedProvReqB, failedProvReq, notProvisionedRecentlyProvReqB}, + }, + { + name: "Provisioned=Unknown, no pods are injected", + provReqs: []*provreqwrapper.ProvisioningRequest{unknownProvisionedProvReqB, failedProvReq, notProvisionedRecentlyProvReqB}, + }, + } + for _, tc := range testCases { + client := provreqclient.NewFakeProvisioningRequestClient(context.Background(), t, tc.provReqs...) + injector := ProvisioningRequestPodsInjector{client, clock.NewFakePassiveClock(now)} + getUnscheduledPods, err := injector.Process(nil, []*v1.Pod{}) + if err != nil { + t.Errorf("%s failed: injector.Process return error %v", tc.name, err) + } + if len(getUnscheduledPods) != tc.wantUnscheduledPodCount { + t.Errorf("%s failed: injector.Process return %d unscheduled pods, want %d", tc.name, len(getUnscheduledPods), tc.wantUnscheduledPodCount) + } + if tc.wantUpdatedConditionName == "" { + continue + } + pr, _ := client.ProvisioningRequest("ns", tc.wantUpdatedConditionName) + accepted := apimeta.FindStatusCondition(pr.Conditions(), v1beta1.Accepted) + if accepted == nil || accepted.LastTransitionTime != metav1.NewTime(now) { + t.Errorf("%s: injector.Process hasn't update accepted condition for ProvisioningRequest %s", tc.name, tc.wantUpdatedConditionName) + } + } + +} + +func testProvisioningRequestWithCondition(name string, podCount int, conditions ...metav1.Condition) *provreqwrapper.ProvisioningRequest { + pr := provreqwrapper.BuildTestProvisioningRequest("ns", name, "10", "100", "", int32(podCount), false, time.Now(), "ProvisioningClass") + pr.V1Beta1().Status.Conditions = conditions + return pr +} diff --git a/cluster-autoscaler/provisioningrequest/apis/autoscaling.x-k8s.io/v1beta1/types.go b/cluster-autoscaler/provisioningrequest/apis/autoscaling.x-k8s.io/v1beta1/types.go index ee6b93e7d31e..1b022e3ba1b2 100644 --- a/cluster-autoscaler/provisioningrequest/apis/autoscaling.x-k8s.io/v1beta1/types.go +++ b/cluster-autoscaler/provisioningrequest/apis/autoscaling.x-k8s.io/v1beta1/types.go @@ -175,9 +175,14 @@ type Detail string // The following constants list all currently available Conditions Type values. // See: https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Condition const ( + // Accepted indicates that the ProvisioningRequest was accepted by ClusterAutoscaler, + // so ClusterAutoscaler will attempt to provision the nodes for it. + Accepted string = "Accepted" // BookingExpired indicates that the ProvisioningRequest had Provisioned condition before // and capacity reservation time is expired. BookingExpired string = "BookingExpired" + // CapacityRevoked indicates that requested resources are not longer valid. + CapacityRevoked string = "CapacityRevoked" // Provisioned indicates that all of the requested resources were created // and are available in the cluster. CA will set this condition when the // VM creation finishes successfully. diff --git a/cluster-autoscaler/provisioningrequest/checkcapacity/orchestrator.go b/cluster-autoscaler/provisioningrequest/checkcapacity/orchestrator.go index b4766fb03085..9e23f37a7fdc 100644 --- a/cluster-autoscaler/provisioningrequest/checkcapacity/orchestrator.go +++ b/cluster-autoscaler/provisioningrequest/checkcapacity/orchestrator.go @@ -27,6 +27,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/processors/status" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/apis/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/conditions" provreq_pods "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/pods" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqclient" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" @@ -121,13 +122,13 @@ func (o *provReqOrchestrator) bookCapacity() error { } podsToCreate := []*apiv1.Pod{} for _, provReq := range provReqs { - if shouldCapacityBeBooked(provReq) { + if conditions.ShouldCapacityBeBooked(provReq) { pods, err := provreq_pods.PodsForProvisioningRequest(provReq) if err != nil { // ClusterAutoscaler was able to create pods before, so we shouldn't have error here. // If there is an error, mark PR as invalid, because we won't be able to book capacity // for it anyway. - setCondition(provReq, v1beta1.Failed, metav1.ConditionTrue, FailedToBookCapacityReason, fmt.Sprintf("Couldn't create pods, err: %v", err), metav1.Now()) + conditions.AddOrUpdateCondition(provReq, v1beta1.Failed, metav1.ConditionTrue, conditions.FailedToBookCapacityReason, fmt.Sprintf("Couldn't create pods, err: %v", err), metav1.Now()) continue } podsToCreate = append(podsToCreate, pods...) @@ -151,10 +152,10 @@ func (o *provReqOrchestrator) scaleUp(unschedulablePods []*apiv1.Pod) (bool, err } st, _, err := o.injector.TrySchedulePods(o.context.ClusterSnapshot, unschedulablePods, scheduling.ScheduleAnywhere, true) if len(st) < len(unschedulablePods) || err != nil { - setCondition(provReq, v1beta1.Provisioned, metav1.ConditionFalse, CapacityIsFoundReason, "Capacity is not found, CA will try to find it later.", metav1.Now()) + conditions.AddOrUpdateCondition(provReq, v1beta1.Provisioned, metav1.ConditionFalse, conditions.CapacityIsNotFoundReason, "Capacity is not found, CA will try to find it later.", metav1.Now()) return false, err } - setCondition(provReq, v1beta1.Provisioned, metav1.ConditionTrue, CapacityIsFoundReason, "Capacity is found in the cluster.", metav1.Now()) + conditions.AddOrUpdateCondition(provReq, v1beta1.Provisioned, metav1.ConditionTrue, conditions.CapacityIsFoundReason, conditions.CapacityIsFoundMsg, metav1.Now()) return true, nil } diff --git a/cluster-autoscaler/provisioningrequest/checkcapacity/processor.go b/cluster-autoscaler/provisioningrequest/checkcapacity/processor.go index 95d29c7b867c..c367048c5d7c 100644 --- a/cluster-autoscaler/provisioningrequest/checkcapacity/processor.go +++ b/cluster-autoscaler/provisioningrequest/checkcapacity/processor.go @@ -22,6 +22,7 @@ import ( apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/apis/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/conditions" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" ) @@ -75,14 +76,14 @@ func (p *checkCapacityProcessor) Process(provReqs []*provreqwrapper.Provisioning if updated >= p.maxUpdated { break } - setCondition(provReq, v1beta1.BookingExpired, metav1.ConditionTrue, CapacityReservationTimeExpiredReason, CapacityReservationTimeExpiredMsg, metav1.NewTime(p.now())) + conditions.AddOrUpdateCondition(provReq, v1beta1.BookingExpired, metav1.ConditionTrue, conditions.CapacityReservationTimeExpiredReason, conditions.CapacityReservationTimeExpiredMsg, metav1.NewTime(p.now())) updated++ } for _, provReq := range failedProvReq { if updated >= p.maxUpdated { break } - setCondition(provReq, v1beta1.Failed, metav1.ConditionTrue, ExpiredReason, ExpiredMsg, metav1.NewTime(p.now())) + conditions.AddOrUpdateCondition(provReq, v1beta1.Failed, metav1.ConditionTrue, conditions.ExpiredReason, conditions.ExpiredMsg, metav1.NewTime(p.now())) updated++ } } diff --git a/cluster-autoscaler/provisioningrequest/checkcapacity/processor_test.go b/cluster-autoscaler/provisioningrequest/checkcapacity/processor_test.go index a225a3fa49d7..cd8d114edc50 100644 --- a/cluster-autoscaler/provisioningrequest/checkcapacity/processor_test.go +++ b/cluster-autoscaler/provisioningrequest/checkcapacity/processor_test.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/apis/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/conditions" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqclient" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" ) @@ -51,8 +52,8 @@ func TestProcess(t *testing.T) { Type: v1beta1.Failed, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(now), - Reason: ExpiredReason, - Message: ExpiredMsg, + Reason: conditions.ExpiredReason, + Message: conditions.ExpiredMsg, }, }, }, @@ -64,8 +65,8 @@ func TestProcess(t *testing.T) { Type: v1beta1.Provisioned, Status: metav1.ConditionFalse, LastTransitionTime: metav1.NewTime(dayAgo), - Reason: ExpiredReason, - Message: ExpiredMsg, + Reason: conditions.ExpiredReason, + Message: conditions.ExpiredMsg, }, }, wantConditions: []metav1.Condition{ @@ -73,15 +74,15 @@ func TestProcess(t *testing.T) { Type: v1beta1.Provisioned, Status: metav1.ConditionFalse, LastTransitionTime: metav1.NewTime(dayAgo), - Reason: ExpiredReason, - Message: ExpiredMsg, + Reason: conditions.ExpiredReason, + Message: conditions.ExpiredMsg, }, { Type: v1beta1.Failed, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(now), - Reason: ExpiredReason, - Message: ExpiredMsg, + Reason: conditions.ExpiredReason, + Message: conditions.ExpiredMsg, }, }, }, @@ -93,8 +94,8 @@ func TestProcess(t *testing.T) { Type: v1beta1.Provisioned, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(dayAgo), - Reason: ExpiredReason, - Message: ExpiredMsg, + Reason: conditions.ExpiredReason, + Message: conditions.ExpiredMsg, }, }, wantConditions: []metav1.Condition{ @@ -102,15 +103,15 @@ func TestProcess(t *testing.T) { Type: v1beta1.Provisioned, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(dayAgo), - Reason: ExpiredReason, - Message: ExpiredMsg, + Reason: conditions.ExpiredReason, + Message: conditions.ExpiredMsg, }, { Type: v1beta1.BookingExpired, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(now), - Reason: CapacityReservationTimeExpiredReason, - Message: CapacityReservationTimeExpiredMsg, + Reason: conditions.CapacityReservationTimeExpiredReason, + Message: conditions.CapacityReservationTimeExpiredMsg, }, }, }, @@ -154,8 +155,8 @@ func TestProcess(t *testing.T) { Type: v1beta1.Failed, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(now), - Reason: ExpiredReason, - Message: ExpiredMsg, + Reason: conditions.ExpiredReason, + Message: conditions.ExpiredMsg, }, }, additionalPr.Conditions()) } else { diff --git a/cluster-autoscaler/provisioningrequest/checkcapacity/condition_test.go b/cluster-autoscaler/provisioningrequest/conditions/condition_test.go similarity index 93% rename from cluster-autoscaler/provisioningrequest/checkcapacity/condition_test.go rename to cluster-autoscaler/provisioningrequest/conditions/condition_test.go index f95b410637f5..f453ec07a366 100644 --- a/cluster-autoscaler/provisioningrequest/checkcapacity/condition_test.go +++ b/cluster-autoscaler/provisioningrequest/conditions/condition_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package checkcapacity +package conditions import ( "testing" @@ -98,7 +98,7 @@ func TestBookCapacity(t *testing.T) { Conditions: test.prConditions, }, }, nil) - got := shouldCapacityBeBooked(pr) + got := ShouldCapacityBeBooked(pr) if got != test.want { t.Errorf("Want: %v, got: %v", test.want, got) } @@ -114,6 +114,17 @@ func TestSetCondition(t *testing.T) { newStatus v1.ConditionStatus want []v1.Condition }{ + { + name: "Accepted added, empty conditions before", + newType: v1beta1.Accepted, + newStatus: v1.ConditionTrue, + want: []v1.Condition{ + { + Type: v1beta1.Accepted, + Status: v1.ConditionTrue, + }, + }, + }, { name: "Provisioned added, empty conditions before", newType: v1beta1.Provisioned, @@ -242,7 +253,7 @@ func TestSetCondition(t *testing.T) { Conditions: test.oldConditions, }, }, nil) - setCondition(pr, test.newType, test.newStatus, "", "", v1.Now()) + AddOrUpdateCondition(pr, test.newType, test.newStatus, "", "", v1.Now()) got := pr.Conditions() if len(got) > 2 || len(got) != len(test.want) || got[0].Type != test.want[0].Type || got[0].Status != test.want[0].Status { t.Errorf("want %v, got: %v", test.want, got) diff --git a/cluster-autoscaler/provisioningrequest/checkcapacity/condition.go b/cluster-autoscaler/provisioningrequest/conditions/conditions.go similarity index 65% rename from cluster-autoscaler/provisioningrequest/checkcapacity/condition.go rename to cluster-autoscaler/provisioningrequest/conditions/conditions.go index 1638bdfb91e4..11133c6fd965 100644 --- a/cluster-autoscaler/provisioningrequest/checkcapacity/condition.go +++ b/cluster-autoscaler/provisioningrequest/conditions/conditions.go @@ -14,20 +14,29 @@ See the License for the specific language governing permissions and limitations under the License. */ -package checkcapacity +package conditions import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/apis/autoscaling.x-k8s.io/v1beta1" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" "k8s.io/klog/v2" ) const ( + //AcceptedReason is added when ProvisioningRequest is accepted by ClusterAutoscaler + AcceptedReason = "Accepted" + //AcceptedMsg is added when ProvisioningRequest is accepted by ClusterAutoscaler + AcceptedMsg = "ProvisioningRequest is accepted by ClusterAutoscaler" //CapacityIsNotFoundReason is added when capacity was not found in the cluster. CapacityIsNotFoundReason = "CapacityIsNotFound" //CapacityIsFoundReason is added when capacity was found in the cluster. CapacityIsFoundReason = "CapacityIsFound" + // CapacityIsFoundMsg is added when capacity was found in the cluster. + CapacityIsFoundMsg = "Capacity is found in the cluster" + //FailedToCreatePodsReason is added when CA failed to create pods for ProvisioningRequest. + FailedToCreatePodsReason = "FailedToCreatePods" //FailedToBookCapacityReason is added when Cluster Autoscaler failed to book capacity in the cluster. FailedToBookCapacityReason = "FailedToBookCapacity" //CapacityReservationTimeExpiredReason is added whed capacity reservation time is expired. @@ -40,27 +49,24 @@ const ( ExpiredMsg = "ProvisioningRequest is expired" ) -func shouldCapacityBeBooked(pr *provreqwrapper.ProvisioningRequest) bool { +// ShouldCapacityBeBooked returns whether capacity should be booked. +func ShouldCapacityBeBooked(pr *provreqwrapper.ProvisioningRequest) bool { if pr.V1Beta1().Spec.ProvisioningClassName != v1beta1.ProvisioningClassCheckCapacity { return false } - if pr.Conditions() == nil || len(pr.Conditions()) == 0 { + conditions := pr.Conditions() + if apimeta.IsStatusConditionTrue(conditions, v1beta1.Failed) || apimeta.IsStatusConditionTrue(conditions, v1beta1.BookingExpired) { return false + } else if apimeta.IsStatusConditionTrue(conditions, v1beta1.Provisioned) { + return true } - book := false - for _, condition := range pr.Conditions() { - if checkConditionType(condition, v1beta1.BookingExpired) || checkConditionType(condition, v1beta1.Failed) { - return false - } else if checkConditionType(condition, v1beta1.Provisioned) { - book = true - } - } - return book + return false } -func setCondition(pr *provreqwrapper.ProvisioningRequest, conditionType string, conditionStatus v1.ConditionStatus, reason, message string, now v1.Time) { - var newConditions []v1.Condition - newCondition := v1.Condition{ +// AddOrUpdateCondition adds a Condition if the condition is not present amond ProvisioningRequest conditions or updte it otherwise. +func AddOrUpdateCondition(pr *provreqwrapper.ProvisioningRequest, conditionType string, conditionStatus metav1.ConditionStatus, reason, message string, now metav1.Time) { + var newConditions []metav1.Condition + newCondition := metav1.Condition{ Type: conditionType, Status: conditionStatus, ObservedGeneration: pr.V1Beta1().GetObjectMeta().GetGeneration(), @@ -70,7 +76,7 @@ func setCondition(pr *provreqwrapper.ProvisioningRequest, conditionType string, } prevConditions := pr.Conditions() switch conditionType { - case v1beta1.Provisioned, v1beta1.BookingExpired, v1beta1.Failed: + case v1beta1.Provisioned, v1beta1.BookingExpired, v1beta1.Failed, v1beta1.Accepted: conditionFound := false for _, condition := range prevConditions { if condition.Type == conditionType { @@ -89,7 +95,3 @@ func setCondition(pr *provreqwrapper.ProvisioningRequest, conditionType string, } pr.SetConditions(newConditions) } - -func checkConditionType(condition v1.Condition, conditionType string) bool { - return condition.Type == conditionType && condition.Status == v1.ConditionTrue -}