diff --git a/cluster-autoscaler/core/scaleup/orchestrator/provreq/orchestrator.go b/cluster-autoscaler/core/scaleup/orchestrator/provreq/orchestrator.go new file mode 100644 index 000000000000..8786663fa609 --- /dev/null +++ b/cluster-autoscaler/core/scaleup/orchestrator/provreq/orchestrator.go @@ -0,0 +1,114 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provreq + +import ( + appsv1 "k8s.io/api/apps/v1" + apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/clusterstate" + "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/processors/status" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/bookcapacity" + provreq_pods "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/pods" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqclient" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" + "k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling" + "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + "k8s.io/autoscaler/cluster-autoscaler/utils/taints" + "k8s.io/client-go/rest" + + ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" + schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" +) + +type provisioningRequestClient interface { + ProvisioningRequests() ([]*provreqwrapper.ProvisioningRequest, error) + ProvisioningRequest(namespace, name string) (*provreqwrapper.ProvisioningRequest, error) +} + +type provReqOrchestrator struct { + context *context.AutoscalingContext + client provisioningRequestClient + injector *scheduling.HintingSimulator +} + +func New(kubeConfig *rest.Config) (*provReqOrchestrator, error) { + client, err := provreqclient.NewProvisioningRequestClient(kubeConfig) + if err != nil { + return nil, err + } + + return &provReqOrchestrator{client: client}, nil +} + +func (o *provReqOrchestrator) Initialize( + autoscalingContext *context.AutoscalingContext, + processors *ca_processors.AutoscalingProcessors, + clusterStateRegistry *clusterstate.ClusterStateRegistry, + taintConfig taints.TaintConfig, +) { + o.context = autoscalingContext + o.injector = scheduling.NewHintingSimulator(autoscalingContext.PredicateChecker) +} + +func (o *provReqOrchestrator) ScaleUp( + unschedulablePods []*apiv1.Pod, + nodes []*apiv1.Node, + daemonSets []*appsv1.DaemonSet, + nodeInfos map[string]*schedulerframework.NodeInfo) (*status.ScaleUpStatus, errors.AutoscalerError) { + provReqs, err := o.client.ProvisioningRequests() + if err != nil { + //log error + } + podsToCreate := []*apiv1.Pod{} + for _, provReq := range provReqs { + if bookcapacity.BookCapacity(provReq) { + pods, err := provreq_pods.PodsForProvisioningRequest(provReq) + if err != nil { + //log error + } + podsToCreate = append(podsToCreate, pods...) + } + } + o.context.ClusterSnapshot.Fork() + defer o.context.ClusterSnapshot.Revert() + // scheduling the pods to reserve capacity for provisioning request with BookCapacity condition + o.injector.TrySchedulePods(o.context.ClusterSnapshot, podsToCreate, scheduling.ScheduleAnywhere, false) + + unschedulablePods = bookcapacity.FilterBookCapacityClass(unschedulablePods) + provReq, err := o.client.ProvisioningRequest(unschedulablePods[0].Namespace, unschedulablePods[0].OwnerReferences[0].Name) + if err != nil { + //TODO return error + } + _, _, err = o.injector.TrySchedulePods(o.context.ClusterSnapshot, unschedulablePods, scheduling.ScheduleAnywhere, true) + if err == nil { + bookcapacity.SetCondition(provReq, bookcapacity.BookCapacityCondition, "Capacity is found", "") + //TODO return status? + return nil, nil + } + //TODO check if error is persistent + + bookcapacity.SetCondition(provReq, bookcapacity.PendingCondition, "Capacity is not found", "") + //TODO return status, error? + return nil, nil +} + +func (o *provReqOrchestrator) ScaleUpToNodeGroupMinSize( + nodes []*apiv1.Node, + nodeInfos map[string]*schedulerframework.NodeInfo) (*status.ScaleUpStatus, errors.AutoscalerError) { + return nil, nil +} \ No newline at end of file diff --git a/cluster-autoscaler/core/scaleup/orchestrator/wrapper_orchestrator.go b/cluster-autoscaler/core/scaleup/orchestrator/wrapper_orchestrator.go new file mode 100644 index 000000000000..90e25996d764 --- /dev/null +++ b/cluster-autoscaler/core/scaleup/orchestrator/wrapper_orchestrator.go @@ -0,0 +1,110 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package orchestrator + +import ( + appsv1 "k8s.io/api/apps/v1" + apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/clusterstate" + "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/core/scaleup" + "k8s.io/autoscaler/cluster-autoscaler/core/scaleup/orchestrator/provreq" + ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" + "k8s.io/autoscaler/cluster-autoscaler/processors/status" + "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + "k8s.io/autoscaler/cluster-autoscaler/utils/taints" + "k8s.io/client-go/rest" + schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" +) + +const ( + consumeProvReq = "cluster-autoscaler.kubernetes.io/consume-provisioning-request" +) + +type WrapperOrchestrator struct { + scaleUpRegularPods bool + scaleUpOrchestrator scaleup.Orchestrator + provReqOrchestrator scaleup.Orchestrator +} + +func NewWrapperOrchestrator(kubeConfig *rest.Config) scaleup.Orchestrator { + provReqOrchestrator, err := provreq.New(kubeConfig) + if err != nil { + return &WrapperOrchestrator{ + scaleUpOrchestrator: New(), + provReqOrchestrator: provReqOrchestrator, + } + } + // log error or return error? + return New() + +} + +// Initialize initializes the orchestrator object with required fields. +func (o *WrapperOrchestrator) Initialize( + autoscalingContext *context.AutoscalingContext, + processors *ca_processors.AutoscalingProcessors, + clusterStateRegistry *clusterstate.ClusterStateRegistry, + taintConfig taints.TaintConfig, +) { + o.scaleUpOrchestrator.Initialize(autoscalingContext, processors, clusterStateRegistry, taintConfig) + o.provReqOrchestrator.Initialize(autoscalingContext, processors, clusterStateRegistry, taintConfig) +} + +// ScaleUp run scaleUp function for regular pods of pods from ProvisioningRequest. +func (o *WrapperOrchestrator) ScaleUp( + unschedulablePods []*apiv1.Pod, + nodes []*apiv1.Node, + daemonSets []*appsv1.DaemonSet, + nodeInfos map[string]*schedulerframework.NodeInfo, +) (*status.ScaleUpStatus, errors.AutoscalerError) { + provReqPods, regularPods := sortOut(unschedulablePods) + if len(provReqPods) == 0 { + return o.scaleUpOrchestrator.ScaleUp(regularPods, nodes, daemonSets, nodeInfos) + } + if len(regularPods) == 0 { + return o.provReqOrchestrator.ScaleUp(provReqPods, nodes, daemonSets, nodeInfos) + } + if o.scaleUpRegularPods { + o.scaleUpRegularPods = false + return o.scaleUpOrchestrator.ScaleUp(regularPods, nodes, daemonSets, nodeInfos) + } + o.scaleUpRegularPods = true + return o.provReqOrchestrator.ScaleUp(provReqPods, nodes, daemonSets, nodeInfos) +} + +func sortOut(unschedulablePods []*apiv1.Pod) (provReqPods, regularPods []*apiv1.Pod) { + for _, pod := range unschedulablePods { + if _, ok := pod.Annotations[consumeProvReq]; ok { + provReqPods = append(provReqPods, pod) + } else { + regularPods = append(regularPods, pod) + } + } + return +} + +// ScaleUpToNodeGroupMinSize tries to scale up node groups that have less nodes +// than the configured min size. The source of truth for the current node group +// size is the TargetSize queried directly from cloud providers. Returns +// appropriate status or error if an unexpected error occurred. +func (o *WrapperOrchestrator) ScaleUpToNodeGroupMinSize( + nodes []*apiv1.Node, + nodeInfos map[string]*schedulerframework.NodeInfo, +) (*status.ScaleUpStatus, errors.AutoscalerError) { + return o.scaleUpOrchestrator.ScaleUpToNodeGroupMinSize(nodes, nodeInfos) +} diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index 5d7cf2acf648..d1b048bd96c9 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -29,6 +29,7 @@ import ( "time" "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/actuation" + "k8s.io/autoscaler/cluster-autoscaler/core/scaleup/orchestrator" "k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" kubelet_config "k8s.io/kubernetes/pkg/kubelet/apis/config" @@ -465,6 +466,12 @@ func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter deleteOptions := options.NewNodeDeleteOptions(autoscalingOptions) drainabilityRules := rules.Default(deleteOptions) + scaleUpOrchestrator := orchestrator.New() + if *provisioningRequestsEnabled { + kubeClient := kube_util.GetKubeConfig(autoscalingOptions.KubeClientOpts) + scaleUpOrchestrator = orchestrator.NewWrapperOrchestrator(kubeClient) + } + opts := core.AutoscalerOptions{ AutoscalingOptions: autoscalingOptions, ClusterSnapshot: clustersnapshot.NewDeltaClusterSnapshot(), @@ -474,6 +481,7 @@ func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter PredicateChecker: predicateChecker, DeleteOptions: deleteOptions, DrainabilityRules: drainabilityRules, + ScaleUpOrchestrator: scaleUpOrchestrator, } opts.Processors = ca_processors.DefaultProcessors(autoscalingOptions) diff --git a/cluster-autoscaler/provisioningrequest/bookcapacity/state.go b/cluster-autoscaler/provisioningrequest/bookcapacity/state.go new file mode 100644 index 000000000000..173b33600370 --- /dev/null +++ b/cluster-autoscaler/provisioningrequest/bookcapacity/state.go @@ -0,0 +1,72 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bookcapacity + +import ( + "time" + + apiv1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" +) + +type ProvisioningRequestCondition string + +const ( + BookCapacityCondition = ProvisioningRequestCondition("BookCapacity") + ExpiredCondition = ProvisioningRequestCondition("Expired") + PendingCondition = ProvisioningRequestCondition("Pending") // or maybe have (BookCapacity; False) condition instead of (Pending; True)? + RejectedCondition = ProvisioningRequestCondition("Rejected") // seems useless + + BookCapacityClass = "book-capacity.kubernetes.io" + DefaultReservationTime = 10 * time.Minute +) + +func BookCapacity(pr *provreqwrapper.ProvisioningRequest) bool { + if pr.V1Beta1().Spec.ProvisioningClassName != BookCapacityClass { + return false + } + if pr.Conditions() == nil { + return false + } + condition := pr.Conditions()[len(pr.Conditions())-1] + if condition.Type == string(BookCapacityCondition) && condition.Status == v1.ConditionTrue { + return true + } + return false +} + +// TODO: should be done by wrappe orchestrator? Injector will inject pods from one provReq, +// so probably this is not needed. But some check should be done, in case future code change. +// filterBookCapacityClass filter out list of pods to return pods that belong to +// one ProvisioningRequest of book-capacity ProvisioningRequestClass. +func FilterBookCapacityClass(unschedulablePods []*apiv1.Pod) []*apiv1.Pod { + return unschedulablePods +} + +func SetCondition(pr *provreqwrapper.ProvisioningRequest, conditionType ProvisioningRequestCondition, reason, message string) { + conditions := pr.Conditions() + conditions = append(conditions, v1.Condition{ + Type: string(conditionType), + Status: v1.ConditionTrue, + // ObservedGeneration: ?, + LastTransitionTime: v1.Now(), + Reason: reason, + Message: message, + }) + pr.SetConditions(conditions) +} diff --git a/cluster-autoscaler/provisioningrequest/pods/pods.go b/cluster-autoscaler/provisioningrequest/pods/pods.go new file mode 100644 index 000000000000..18cd0733d180 --- /dev/null +++ b/cluster-autoscaler/provisioningrequest/pods/pods.go @@ -0,0 +1,84 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pods + +import ( + "fmt" + + "google.golang.org/protobuf/proto" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" + "k8s.io/kubernetes/pkg/controller" +) + +const ( + // ProvisioningRequestPodAnnotationKey is a key used to annotate pods consuming provisioning request. + ProvisioningRequestPodAnnotationKey = "cluster-autoscaler.kubernetes.io/consume-provisioning-request" + // ProvisioningClassPodAnnotationKey is a key used to add annotation about Provisioning Class + ProvisioningClassPodAnnotationKey = "cluster-autoscaler.kubernetes.io/provisioning-class-name" +) + +// PodsForProvisioningRequest returns a list of pods for which Provisioning +// Request needs to provision resources. +func PodsForProvisioningRequest(pr *provreqwrapper.ProvisioningRequest) ([]*v1.Pod, error) { + if pr == nil { + return nil, nil + } + podSets, err := pr.PodSets() + if err != nil { + return nil, err + } + pods := make([]*v1.Pod, 0) + for i, podSet := range podSets { + for j := 0; j < int(podSet.Count); j++ { + pod, err := controller.GetPodFromTemplate(&podSet.PodTemplate, pr.RuntimeObject(), ownerReference(pr)) + if err != nil { + return nil, fmt.Errorf("while creating pod for pr: %s/%s podSet: %d, got error: %w", pr.Namespace(), pr.Name(), i, err) + } + populatePodFields(pr, pod, i, j) + pods = append(pods, pod) + } + } + return pods, nil +} + +// ownerReference injects owner reference that points to the ProvReq object. +// This allows CA to group the pods as coming from one controller and simplifies +// the scale-up simulation logic and number of logs lines emitted. +func ownerReference(pr *provreqwrapper.ProvisioningRequest) *metav1.OwnerReference { + return &metav1.OwnerReference{ + APIVersion: pr.APIVersion(), + Kind: pr.Kind(), + Name: pr.Name(), + UID: pr.UID(), + Controller: proto.Bool(true), + } +} + +func populatePodFields(pr *provreqwrapper.ProvisioningRequest, pod *v1.Pod, i, j int) { + pod.Name = fmt.Sprintf("%s%d-%d", pod.GenerateName, i, j) + pod.Namespace = pr.Namespace() + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + pod.Annotations[ProvisioningRequestPodAnnotationKey] = pr.Name() + pod.Annotations[ProvisioningClassPodAnnotationKey] = pr.V1Beta1().Spec.ProvisioningClassName + pod.UID = types.UID(fmt.Sprintf("%s/%s", pod.Namespace, pod.Name)) + pod.CreationTimestamp = pr.CreationTimestamp() +} \ No newline at end of file diff --git a/cluster-autoscaler/provisioningrequest/pods/pods_test.go b/cluster-autoscaler/provisioningrequest/pods/pods_test.go new file mode 100644 index 000000000000..75ad8cd64682 --- /dev/null +++ b/cluster-autoscaler/provisioningrequest/pods/pods_test.go @@ -0,0 +1,269 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pods + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/proto" + + // "google.golang.org/protobuf/testing/protocmp" + apiv1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/apis/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" +) + +const testProvisioningClassName = "TestProvisioningClass" + +func TestPodsForProvisioningRequest(t *testing.T) { + testPod := func(name, genName, containerName, containerImage, prName string) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + GenerateName: genName, + Namespace: "test-namespace", + UID: types.UID(fmt.Sprintf("test-namespace/%s", name)), + Annotations: map[string]string{ + ProvisioningRequestPodAnnotationKey: prName, + ProvisioningClassPodAnnotationKey: testProvisioningClassName, + }, + Labels: map[string]string{}, + Finalizers: []string{}, + OwnerReferences: []metav1.OwnerReference{ + { + Controller: proto.Bool(true), + Name: prName, + }, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: containerName, + Image: containerImage, + }, + }, + }, + } + } + + tests := []struct { + desc string + pr *v1beta1.ProvisioningRequest + podTemplates []*apiv1.PodTemplate + want []*v1.Pod + wantErr bool + }{ + { + desc: "simple ProvReq", + pr: &v1beta1.ProvisioningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pr-name", + Namespace: "test-namespace", + }, + Spec: v1beta1.ProvisioningRequestSpec{ + PodSets: []v1beta1.PodSet{ + { + Count: 1, + PodTemplateRef: v1beta1.Reference{Name: "template-1"}, + }, + }, + ProvisioningClassName: testProvisioningClassName, + }, + }, + podTemplates: []*apiv1.PodTemplate{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "template-1", + Namespace: "test-namespace", + }, + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-container", + Image: "test-image", + }, + }, + }, + }, + }, + }, + want: []*v1.Pod{ + testPod("test-pr-name-0-0", "test-pr-name-", "test-container", "test-image", "test-pr-name"), + }, + }, + { + desc: "ProvReq already having taint", + pr: &v1beta1.ProvisioningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pr-name", + Namespace: "test-namespace", + }, + Spec: v1beta1.ProvisioningRequestSpec{ + PodSets: []v1beta1.PodSet{ + { + Count: 1, + PodTemplateRef: v1beta1.Reference{Name: "template-1"}, + }, + }, + ProvisioningClassName: testProvisioningClassName, + }, + }, + podTemplates: []*apiv1.PodTemplate{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "template-1", + Namespace: "test-namespace", + }, + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-container", + Image: "test-image", + }, + }, + }, + }, + }, + }, + want: []*v1.Pod{ + testPod("test-pr-name-0-0", "test-pr-name-", "test-container", "test-image", "test-pr-name"), + }, + }, + { + desc: "ProvReq already having nodeSelector", + pr: &v1beta1.ProvisioningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pr-name", + Namespace: "test-namespace", + }, + Spec: v1beta1.ProvisioningRequestSpec{ + PodSets: []v1beta1.PodSet{ + { + Count: 1, + PodTemplateRef: v1beta1.Reference{Name: "template-1"}, + }, + }, + ProvisioningClassName: testProvisioningClassName, + }, + }, + podTemplates: []*v1.PodTemplate{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "template-1", + Namespace: "test-namespace", + }, + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-container", + Image: "test-image", + }, + }, + }, + }, + }, + }, + want: []*v1.Pod{ + testPod("test-pr-name-0-0", "test-pr-name-", "test-container", "test-image", "test-pr-name"), + }, + }, + { + desc: "ProvReq with multiple pod sets", + pr: &v1beta1.ProvisioningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pr-name", + Namespace: "test-namespace", + }, + Spec: v1beta1.ProvisioningRequestSpec{ + PodSets: []v1beta1.PodSet{ + { + Count: 2, + PodTemplateRef: v1beta1.Reference{Name: "template-1"}, + }, + { + Count: 3, + PodTemplateRef: v1beta1.Reference{Name: "template-2"}, + }, + }, + ProvisioningClassName: testProvisioningClassName, + }, + }, + podTemplates: []*v1.PodTemplate{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "template-1", + Namespace: "test-namespace", + }, + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-container", + Image: "test-image", + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "template-2", + Namespace: "test-namespace", + }, + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-container-2", + Image: "test-image-2", + }, + }, + }, + }, + }, + }, + want: []*v1.Pod{ + testPod("test-pr-name-0-0", "test-pr-name-", "test-container", "test-image", "test-pr-name"), + testPod("test-pr-name-0-1", "test-pr-name-", "test-container", "test-image", "test-pr-name"), + testPod("test-pr-name-1-0", "test-pr-name-", "test-container-2", "test-image-2", "test-pr-name"), + testPod("test-pr-name-1-1", "test-pr-name-", "test-container-2", "test-image-2", "test-pr-name"), + testPod("test-pr-name-1-2", "test-pr-name-", "test-container-2", "test-image-2", "test-pr-name"), + }, + }, + } + for _, tc := range tests { + t.Run(tc.desc, func(t *testing.T) { + got, err := PodsForProvisioningRequest(provreqwrapper.NewV1Beta1ProvisioningRequest(tc.pr, tc.podTemplates)) + if (err != nil) != tc.wantErr { + t.Errorf("PodsForProvisioningRequest() error = %v, wantErr %v", err, tc.wantErr) + return + } + if diff := cmp.Diff(got, tc.want); diff != "" { + t.Errorf("unexpected response from PodsForProvisioningRequest(), diff (-want +got): %v", diff) + } + }) + } +} diff --git a/cluster-autoscaler/utils/kubernetes/client.go b/cluster-autoscaler/utils/kubernetes/client.go index a86d452e86e1..1dbe01938bf9 100644 --- a/cluster-autoscaler/utils/kubernetes/client.go +++ b/cluster-autoscaler/utils/kubernetes/client.go @@ -35,11 +35,11 @@ const ( // CreateKubeClient creates kube client based on AutoscalingOptions.KubeClientOptions func CreateKubeClient(opts config.KubeClientOptions) kube_client.Interface { - return kube_client.NewForConfigOrDie(getKubeConfig(opts)) + return kube_client.NewForConfigOrDie(GetKubeConfig(opts)) } -// getKubeConfig returns the rest config from AutoscalingOptions.KubeClientOptions. -func getKubeConfig(opts config.KubeClientOptions) *rest.Config { +// GetKubeConfig returns the rest config from AutoscalingOptions.KubeClientOptions. +func GetKubeConfig(opts config.KubeClientOptions) *rest.Config { var kubeConfig *rest.Config var err error