diff --git a/.github/workflows/pr-e2e.yml b/.github/workflows/pr-e2e.yml
index 922758879c9..035a99cf36d 100644
--- a/.github/workflows/pr-e2e.yml
+++ b/.github/workflows/pr-e2e.yml
@@ -22,7 +22,7 @@ jobs:
id: checkUserMember
with:
username: ${{ github.actor }}
- team: 'keda-e2e-test-executors'
+ team: "keda-e2e-test-executors"
GITHUB_TOKEN: ${{ secrets.GH_CHECKING_USER_AUTH }}
- name: Update comment with the execution url
@@ -221,5 +221,5 @@ jobs:
uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4
with:
name: e2e-test-logs
- path: '${{ github.workspace }}/tests/**/*.log'
+ path: "${{ github.workspace }}/**/*.log"
if-no-files-found: ignore
diff --git a/.github/workflows/template-main-e2e-test.yml b/.github/workflows/template-main-e2e-test.yml
index 78400244ab7..1b84ae767d4 100644
--- a/.github/workflows/template-main-e2e-test.yml
+++ b/.github/workflows/template-main-e2e-test.yml
@@ -51,5 +51,5 @@ jobs:
if: ${{ always() }}
with:
name: e2e-test-logs
- path: '${{ github.workspace }}/tests/**/*.log'
+ path: "${{ github.workspace }}/**/*.log"
if-no-files-found: ignore
diff --git a/.github/workflows/template-smoke-tests.yml b/.github/workflows/template-smoke-tests.yml
index 75a8959935b..a01b234e7ac 100644
--- a/.github/workflows/template-smoke-tests.yml
+++ b/.github/workflows/template-smoke-tests.yml
@@ -48,5 +48,5 @@ jobs:
if: ${{ always() }}
with:
name: smoke-test-logs ${{ inputs.runs-on }}-${{ inputs.kubernetesVersion }}
- path: "${{ github.workspace }}/tests/**/*.log"
+ path: "${{ github.workspace }}/**/*.log"
if-no-files-found: ignore
diff --git a/.gitignore b/.gitignore
index 51f73f9d700..9040c59a8ea 100644
--- a/.gitignore
+++ b/.gitignore
@@ -45,3 +45,6 @@ __debug_bin
# GO Test result
report.xml
+
+# KEDA Certs
+certs/*
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 80ab13ab073..eabd94fd905 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -51,7 +51,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio
### New
-- **General**: TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX))
+- **General**: Introduce new AWS Authentication ([#4134](https://github.com/kedacore/keda/issues/4134))
#### Experimental
@@ -65,7 +65,9 @@ Here is an overview of all new **experimental** features:
- **General**: Add parameter queryParameters to prometheus-scaler ([#4962](https://github.com/kedacore/keda/issues/4962))
- **General**: Add validations for replica counts when creating ScaledObjects ([#5288](https://github.com/kedacore/keda/issues/5288))
- **General**: Bubble up AuthRef TriggerAuthentication errors as ScaledObject events ([#5190](https://github.com/kedacore/keda/issues/5190))
+- **General**: Enhance podIdentity Role Assumption in AWS by Direct Integration with OIDC/Federation ([#5178](https://github.com/kedacore/keda/issues/5178))
- **General**: Fix issue where paused annotation being set to false still leads to scaled objects/jobs being paused ([#5215](https://github.com/kedacore/keda/issues/5215))
+- **General**: Implement Credentials Cache for AWS Roles to reduce AWS API calls ([#5297](https://github.com/kedacore/keda/issues/5297))
- **General**: Support TriggerAuthentication properties from ConfigMap ([#4830](https://github.com/kedacore/keda/issues/4830))
- **General**: Use client-side round-robin load balancing for grpc calls ([#5224](https://github.com/kedacore/keda/issues/5224))
- **GCP pubsub scaler**: Support distribution-valued metrics and metrics from topics ([#5070](https://github.com/kedacore/keda/issues/5070))
diff --git a/CREATE-NEW-SCALER.md b/CREATE-NEW-SCALER.md
index d67d0248a44..3692de3f253 100644
--- a/CREATE-NEW-SCALER.md
+++ b/CREATE-NEW-SCALER.md
@@ -65,18 +65,18 @@ The return type of this function is `MetricSpec`, but in KEDA's case we will mos
- `TargetValue`: is the value of the metric we want to reach at all times at all costs. As long as the current metric doesn't match TargetValue, HPA will increase the number of the pods until it reaches the maximum number of pods allowed to scale to.
- `TargetAverageValue`: the value of the metric for which we require one pod to handle. e.g. if we have a scaler based on the length of a message queue, and we specificy 10 for `TargetAverageValue`, we are saying that each pod will handle 10 messages. So if the length of the queue becomes 30, we expect that we have 3 pods in our cluster. (`TargetAverage` and `TargetValue` are mutually exclusive).
-All scalers receive a parameter named `scalerIndex` as part of `ScalerConfig`. This value is the index of the current scaler in a ScaledObject. All metric names have to start with `sX-` (where `X` is `scalerIndex`). This convention makes the metric name unique in the ScaledObject and brings the option to have more than 1 "similar metric name" defined in a ScaledObject.
+All scalers receive a parameter named `triggerIndex` as part of `ScalerConfig`. This value is the index of the current scaler in a ScaledObject. All metric names have to start with `sX-` (where `X` is `triggerIndex`). This convention makes the metric name unique in the ScaledObject and brings the option to have more than 1 "similar metric name" defined in a ScaledObject.
For example:
- s0-redis-mylist
- s1-redis-mylist
->**Note:** There is a naming helper function `GenerateMetricNameWithIndex(scalerIndex int, metricName string)`, that receives the current index and the original metric name (without the prefix) and returns the concatenated string using the convention (please use this function).
Next lines are an example about how to use it:
+>**Note:** There is a naming helper function `GenerateMetricNameWithIndex(triggerIndex int, metricName string)`, that receives the current index and the original metric name (without the prefix) and returns the concatenated string using the convention (please use this function).
Next lines are an example about how to use it:
>```golang
>func (s *artemisScaler) GetMetricSpecForScaling() []v2.MetricSpec {
> externalMetric := &v2.ExternalMetricSource{
> Metric: v2.MetricIdentifier{
-> Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("%s-%s-%s", "artemis", s.metadata.brokerName, s.metadata.queueName))),
+> Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("%s-%s-%s", "artemis", s.metadata.brokerName, s.metadata.queueName))),
> },
> Target: GetMetricTarget(s.metricType, s.metadata.queueLength),
> }
diff --git a/Makefile b/Makefile
index 14f1f749646..2b1f547d772 100644
--- a/Makefile
+++ b/Makefile
@@ -281,7 +281,7 @@ deploy: install ## Deploy controller to the K8s cluster specified in ~/.kube/con
fi
if [ "$(AWS_RUN_IDENTITY_TESTS)" = true ]; then \
cd config/service_account && \
- $(KUSTOMIZE) edit add annotation --force eks.amazonaws.com/role-arn:arn:aws:iam::${TF_AWS_ACCOUNT_ID}:role/${TEST_CLUSTER_NAME}-role; \
+ $(KUSTOMIZE) edit add annotation --force eks.amazonaws.com/role-arn:${TF_AWS_KEDA_ROLE}; \
fi
if [ "$(GCP_RUN_IDENTITY_TESTS)" = true ]; then \
cd config/service_account && \
diff --git a/apis/keda/v1alpha1/triggerauthentication_types.go b/apis/keda/v1alpha1/triggerauthentication_types.go
index 9483abedb6b..6973d0a957e 100644
--- a/apis/keda/v1alpha1/triggerauthentication_types.go
+++ b/apis/keda/v1alpha1/triggerauthentication_types.go
@@ -118,9 +118,9 @@ const (
PodIdentityProviderAzure PodIdentityProvider = "azure"
PodIdentityProviderAzureWorkload PodIdentityProvider = "azure-workload"
PodIdentityProviderGCP PodIdentityProvider = "gcp"
- PodIdentityProviderSpiffe PodIdentityProvider = "spiffe"
PodIdentityProviderAwsEKS PodIdentityProvider = "aws-eks"
PodIdentityProviderAwsKiam PodIdentityProvider = "aws-kiam"
+ PodIdentityProviderAws PodIdentityProvider = "aws"
)
// PodIdentityAnnotationEKS specifies aws role arn for aws-eks Identity Provider
@@ -133,9 +133,17 @@ const (
// AuthPodIdentity allows users to select the platform native identity
// mechanism
type AuthPodIdentity struct {
+ // +kubebuilder:validation:Enum=azure;azure-workload;gcp;aws;aws-eks;aws-kiam
Provider PodIdentityProvider `json:"provider"`
// +optional
IdentityID *string `json:"identityId"`
+ // +optional
+ // RoleArn sets the AWS RoleArn to be used. Mutually exclusive with IdentityOwner
+ RoleArn string `json:"roleArn"`
+ // +kubebuilder:validation:Enum=keda;workload
+ // +optional
+ // IdentityOwner configures which identity has to be used during auto discovery, keda or the scaled workload. Mutually exclusive with roleArn
+ IdentityOwner *string `json:"identityOwner"`
}
func (a *AuthPodIdentity) GetIdentityID() string {
@@ -145,6 +153,13 @@ func (a *AuthPodIdentity) GetIdentityID() string {
return *a.IdentityID
}
+func (a *AuthPodIdentity) IsWorkloadIdentityOwner() bool {
+ if a.IdentityOwner == nil {
+ return false
+ }
+ return *a.IdentityOwner == workloadString
+}
+
// AuthConfigMapTargetRef is used to authenticate using a reference to a config map
type AuthConfigMapTargetRef AuthTargetRef
diff --git a/apis/keda/v1alpha1/triggerauthentication_webhook.go b/apis/keda/v1alpha1/triggerauthentication_webhook.go
index 72b14e1b388..df77bbc1e18 100644
--- a/apis/keda/v1alpha1/triggerauthentication_webhook.go
+++ b/apis/keda/v1alpha1/triggerauthentication_webhook.go
@@ -28,6 +28,11 @@ import (
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
+const (
+ kedaString = "keda"
+ workloadString = "workload"
+)
+
var triggerauthenticationlog = logf.Log.WithName("triggerauthentication-validation-webhook")
func (ta *TriggerAuthentication) SetupWebhookWithManager(mgr ctrl.Manager) error {
@@ -113,6 +118,10 @@ func validateSpec(spec *TriggerAuthenticationSpec) (admission.Warnings, error) {
if spec.PodIdentity.IdentityID != nil && *spec.PodIdentity.IdentityID == "" {
return nil, fmt.Errorf("identityid of PodIdentity should not be empty. If it's set, identityId has to be different than \"\"")
}
+ case PodIdentityProviderAws:
+ if spec.PodIdentity.RoleArn != "" && spec.PodIdentity.IsWorkloadIdentityOwner() {
+ return nil, fmt.Errorf("roleArn of PodIdentity can't be set if KEDA isn't identityOwner")
+ }
default:
return nil, nil
}
diff --git a/apis/keda/v1alpha1/triggerauthentication_webhook_test.go b/apis/keda/v1alpha1/triggerauthentication_webhook_test.go
index b18585ff97b..44ea8ed762c 100644
--- a/apis/keda/v1alpha1/triggerauthentication_webhook_test.go
+++ b/apis/keda/v1alpha1/triggerauthentication_webhook_test.go
@@ -24,13 +24,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-var _ = It("validate triggerauthentication when IdentityID is nil", func() {
+var _ = It("validate triggerauthentication when IdentityID is nil, roleArn is empty and identityOwner is nil", func() {
namespaceName := "nilidentityid"
namespace := createNamespace(namespaceName)
err := k8sClient.Create(context.Background(), namespace)
Expect(err).ToNot(HaveOccurred())
- spec := createTriggerAuthenticationSpecWithPodIdentity(nil)
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAzure, "", nil, nil)
ta := createTriggerAuthentication("nilidentityidta", namespaceName, "TriggerAuthentication", spec)
Eventually(func() error {
return k8sClient.Create(context.Background(), ta)
@@ -44,7 +44,7 @@ var _ = It("validate triggerauthentication when IdentityID is empty", func() {
Expect(err).ToNot(HaveOccurred())
identityID := ""
- spec := createTriggerAuthenticationSpecWithPodIdentity(&identityID)
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAzure, "", &identityID, nil)
ta := createTriggerAuthentication("emptyidentityidta", namespaceName, "TriggerAuthentication", spec)
Eventually(func() error {
return k8sClient.Create(context.Background(), ta)
@@ -58,7 +58,76 @@ var _ = It("validate triggerauthentication when IdentityID is not empty", func()
Expect(err).ToNot(HaveOccurred())
identityID := "12345"
- spec := createTriggerAuthenticationSpecWithPodIdentity(&identityID)
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAzure, "", &identityID, nil)
+ ta := createTriggerAuthentication("identityidta", namespaceName, "TriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("validate triggerauthentication when RoleArn is not empty and IdentityOwner is nil", func() {
+ namespaceName := "rolearn"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "Helo", nil, nil)
+ ta := createTriggerAuthentication("identityidta", namespaceName, "TriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("validate triggerauthentication when RoleArn is not empty and IdentityOwner is keda", func() {
+ namespaceName := "rolearnandkedaowner"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ identityOwner := kedaString
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "Helo", nil, &identityOwner)
+ ta := createTriggerAuthentication("identityidta", namespaceName, "TriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("validate triggerauthentication when RoleArn is not empty and IdentityOwner is workload", func() {
+ namespaceName := "rolearnandworkloadowner"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ identityOwner := workloadString
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "Helo", nil, &identityOwner)
+ ta := createTriggerAuthentication("identityidta", namespaceName, "TriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).Should(HaveOccurred())
+})
+
+var _ = It("validate triggerauthentication when RoleArn is empty and IdentityOwner is keda", func() {
+ namespaceName := "kedaowner"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ identityOwner := kedaString
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "", nil, &identityOwner)
+ ta := createTriggerAuthentication("identityidta", namespaceName, "TriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("validate triggerauthentication when RoleArn is not empty and IdentityOwner is workload", func() {
+ namespaceName := "workloadowner"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ identityOwner := workloadString
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "", nil, &identityOwner)
ta := createTriggerAuthentication("identityidta", namespaceName, "TriggerAuthentication", spec)
Eventually(func() error {
return k8sClient.Create(context.Background(), ta)
@@ -71,7 +140,7 @@ var _ = It("validate clustertriggerauthentication when IdentityID is nil", func(
err := k8sClient.Create(context.Background(), namespace)
Expect(err).ToNot(HaveOccurred())
- spec := createTriggerAuthenticationSpecWithPodIdentity(nil)
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAzure, "", nil, nil)
ta := createTriggerAuthentication("clusternilidentityidta", namespaceName, "ClusterTriggerAuthentication", spec)
Eventually(func() error {
return k8sClient.Create(context.Background(), ta)
@@ -85,7 +154,7 @@ var _ = It("validate clustertriggerauthentication when IdentityID is empty", fun
Expect(err).ToNot(HaveOccurred())
identityID := ""
- spec := createTriggerAuthenticationSpecWithPodIdentity(&identityID)
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAzure, "", &identityID, nil)
ta := createTriggerAuthentication("clusteremptyidentityidta", namespaceName, "ClusterTriggerAuthentication", spec)
Eventually(func() error {
return k8sClient.Create(context.Background(), ta)
@@ -99,18 +168,89 @@ var _ = It("validate clustertriggerauthentication when IdentityID is not empty",
Expect(err).ToNot(HaveOccurred())
identityID := "12345"
- spec := createTriggerAuthenticationSpecWithPodIdentity(&identityID)
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAzure, "", &identityID, nil)
ta := createTriggerAuthentication("clusteridentityidta", namespaceName, "ClusterTriggerAuthentication", spec)
Eventually(func() error {
return k8sClient.Create(context.Background(), ta)
}).ShouldNot(HaveOccurred())
})
-func createTriggerAuthenticationSpecWithPodIdentity(identityID *string) TriggerAuthenticationSpec {
+var _ = It("validate clustertriggerauthentication when RoleArn is not empty and IdentityOwner is nil", func() {
+ namespaceName := "clusterrolearn"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "Helo", nil, nil)
+ ta := createTriggerAuthentication("clusteridentityidta", namespaceName, "ClusterTriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("validate clustertriggerauthentication when RoleArn is not empty and IdentityOwner is keda", func() {
+ namespaceName := "clusterrolearnandkedaowner"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ identityOwner := kedaString
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "Helo", nil, &identityOwner)
+ ta := createTriggerAuthentication("clusteridentityidta", namespaceName, "ClusterTriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("validate clustertriggerauthentication when RoleArn is not empty and IdentityOwner is workload", func() {
+ namespaceName := "clusterrolearnandworkloadowner"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ identityOwner := workloadString
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "Helo", nil, &identityOwner)
+ ta := createTriggerAuthentication("clusteridentityidta", namespaceName, "ClusterTriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).Should(HaveOccurred())
+})
+
+var _ = It("validate clustertriggerauthentication when RoleArn is empty and IdentityOwner is keda", func() {
+ namespaceName := "clusterandkedaowner"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ identityOwner := kedaString
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "", nil, &identityOwner)
+ ta := createTriggerAuthentication("clusteridentityidta", namespaceName, "ClusterTriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("validate clustertriggerauthentication when RoleArn is not empty and IdentityOwner is workload", func() {
+ namespaceName := "clusterandworkloadowner"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ identityOwner := workloadString
+ spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "", nil, &identityOwner)
+ ta := createTriggerAuthentication("clusteridentityidta", namespaceName, "TriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).ShouldNot(HaveOccurred())
+})
+
+func createTriggerAuthenticationSpecWithPodIdentity(provider PodIdentityProvider, roleArn string, identityID, identityOwner *string) TriggerAuthenticationSpec {
return TriggerAuthenticationSpec{
PodIdentity: &AuthPodIdentity{
- Provider: PodIdentityProviderAzure,
- IdentityID: identityID,
+ Provider: provider,
+ IdentityID: identityID,
+ RoleArn: roleArn,
+ IdentityOwner: identityOwner,
},
}
}
diff --git a/apis/keda/v1alpha1/zz_generated.deepcopy.go b/apis/keda/v1alpha1/zz_generated.deepcopy.go
index e8525e3dbf6..c9162874ac8 100755
--- a/apis/keda/v1alpha1/zz_generated.deepcopy.go
+++ b/apis/keda/v1alpha1/zz_generated.deepcopy.go
@@ -85,6 +85,11 @@ func (in *AuthPodIdentity) DeepCopyInto(out *AuthPodIdentity) {
*out = new(string)
**out = **in
}
+ if in.IdentityOwner != nil {
+ in, out := &in.IdentityOwner, &out.IdentityOwner
+ *out = new(string)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthPodIdentity.
diff --git a/config/crd/bases/keda.sh_clustertriggerauthentications.yaml b/config/crd/bases/keda.sh_clustertriggerauthentications.yaml
index cc9cacc688f..1885fcd69f9 100644
--- a/config/crd/bases/keda.sh_clustertriggerauthentications.yaml
+++ b/config/crd/bases/keda.sh_clustertriggerauthentications.yaml
@@ -111,8 +111,27 @@ spec:
properties:
identityId:
type: string
+ identityOwner:
+ description: IdentityOwner configures which identity has to
+ be used during auto discovery, keda or the scaled workload.
+ Mutually exclusive with roleArn
+ enum:
+ - keda
+ - workload
+ type: string
provider:
description: PodIdentityProvider contains the list of providers
+ enum:
+ - azure
+ - azure-workload
+ - gcp
+ - aws
+ - aws-eks
+ - aws-kiam
+ type: string
+ roleArn:
+ description: RoleArn sets the AWS RoleArn to be used. Mutually
+ exclusive with IdentityOwner
type: string
required:
- provider
@@ -243,8 +262,27 @@ spec:
properties:
identityId:
type: string
+ identityOwner:
+ description: IdentityOwner configures which identity has to be
+ used during auto discovery, keda or the scaled workload. Mutually
+ exclusive with roleArn
+ enum:
+ - keda
+ - workload
+ type: string
provider:
description: PodIdentityProvider contains the list of providers
+ enum:
+ - azure
+ - azure-workload
+ - gcp
+ - aws
+ - aws-eks
+ - aws-kiam
+ type: string
+ roleArn:
+ description: RoleArn sets the AWS RoleArn to be used. Mutually
+ exclusive with IdentityOwner
type: string
required:
- provider
diff --git a/config/crd/bases/keda.sh_triggerauthentications.yaml b/config/crd/bases/keda.sh_triggerauthentications.yaml
index 6589a44301b..c731c78d1c8 100644
--- a/config/crd/bases/keda.sh_triggerauthentications.yaml
+++ b/config/crd/bases/keda.sh_triggerauthentications.yaml
@@ -110,8 +110,27 @@ spec:
properties:
identityId:
type: string
+ identityOwner:
+ description: IdentityOwner configures which identity has to
+ be used during auto discovery, keda or the scaled workload.
+ Mutually exclusive with roleArn
+ enum:
+ - keda
+ - workload
+ type: string
provider:
description: PodIdentityProvider contains the list of providers
+ enum:
+ - azure
+ - azure-workload
+ - gcp
+ - aws
+ - aws-eks
+ - aws-kiam
+ type: string
+ roleArn:
+ description: RoleArn sets the AWS RoleArn to be used. Mutually
+ exclusive with IdentityOwner
type: string
required:
- provider
@@ -242,8 +261,27 @@ spec:
properties:
identityId:
type: string
+ identityOwner:
+ description: IdentityOwner configures which identity has to be
+ used during auto discovery, keda or the scaled workload. Mutually
+ exclusive with roleArn
+ enum:
+ - keda
+ - workload
+ type: string
provider:
description: PodIdentityProvider contains the list of providers
+ enum:
+ - azure
+ - azure-workload
+ - gcp
+ - aws
+ - aws-eks
+ - aws-kiam
+ type: string
+ roleArn:
+ description: RoleArn sets the AWS RoleArn to be used. Mutually
+ exclusive with IdentityOwner
type: string
required:
- provider
diff --git a/controllers/keda/scaledobject_controller_test.go b/controllers/keda/scaledobject_controller_test.go
index fd6e3cfe98e..a404831d59c 100644
--- a/controllers/keda/scaledobject_controller_test.go
+++ b/controllers/keda/scaledobject_controller_test.go
@@ -96,7 +96,7 @@ var _ = Describe("ScaledObjectController", func() {
TriggerMetadata: tm,
ResolvedEnv: nil,
AuthParams: nil,
- ScalerIndex: i,
+ TriggerIndex: i,
}
s, err := scalers.NewPrometheusScaler(config)
diff --git a/go.mod b/go.mod
index 766418a6244..30c3819977c 100644
--- a/go.mod
+++ b/go.mod
@@ -327,7 +327,7 @@ require (
go.uber.org/automaxprocs v1.5.3
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.26.0 // indirect
- golang.org/x/crypto v0.17.0 // indirect
+ golang.org/x/crypto v0.17.0
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/net v0.19.0 // indirect
diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt
index 81b42562136..2b2464a3ce0 100644
--- a/hack/boilerplate.go.txt
+++ b/hack/boilerplate.go.txt
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/clientset.go b/pkg/generated/clientset/versioned/clientset.go
index 2c41cc612a6..243af317a65 100644
--- a/pkg/generated/clientset/versioned/clientset.go
+++ b/pkg/generated/clientset/versioned/clientset.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/fake/clientset_generated.go b/pkg/generated/clientset/versioned/fake/clientset_generated.go
index bfa8e619a58..15942a25c4d 100644
--- a/pkg/generated/clientset/versioned/fake/clientset_generated.go
+++ b/pkg/generated/clientset/versioned/fake/clientset_generated.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/fake/doc.go b/pkg/generated/clientset/versioned/fake/doc.go
index 05677d419b9..9888423c919 100644
--- a/pkg/generated/clientset/versioned/fake/doc.go
+++ b/pkg/generated/clientset/versioned/fake/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/fake/register.go b/pkg/generated/clientset/versioned/fake/register.go
index 69db8f6c17d..de936814783 100644
--- a/pkg/generated/clientset/versioned/fake/register.go
+++ b/pkg/generated/clientset/versioned/fake/register.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/scheme/doc.go b/pkg/generated/clientset/versioned/scheme/doc.go
index 9472c556e15..44c183eb99f 100644
--- a/pkg/generated/clientset/versioned/scheme/doc.go
+++ b/pkg/generated/clientset/versioned/scheme/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/scheme/register.go b/pkg/generated/clientset/versioned/scheme/register.go
index 8a192967ff6..dab287f31f7 100644
--- a/pkg/generated/clientset/versioned/scheme/register.go
+++ b/pkg/generated/clientset/versioned/scheme/register.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/clustertriggerauthentication.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/clustertriggerauthentication.go
index 3773d69e357..cf8b8e0c154 100644
--- a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/clustertriggerauthentication.go
+++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/clustertriggerauthentication.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/doc.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/doc.go
index 6fd099df002..406b9795497 100644
--- a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/doc.go
+++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/doc.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/doc.go
index 41dbc0f3c40..f97ddfdad73 100644
--- a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/doc.go
+++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_clustertriggerauthentication.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_clustertriggerauthentication.go
index 7e1d056d622..3b6c3c990bc 100644
--- a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_clustertriggerauthentication.go
+++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_clustertriggerauthentication.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_keda_client.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_keda_client.go
index 3a04f230033..82d5036431e 100644
--- a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_keda_client.go
+++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_keda_client.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledjob.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledjob.go
index 9315aa58621..7e82da5c044 100644
--- a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledjob.go
+++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledjob.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledobject.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledobject.go
index 4ac8b40e5bb..2f77b682ca8 100644
--- a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledobject.go
+++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledobject.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_triggerauthentication.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_triggerauthentication.go
index a272dcc5912..575a6dc1f89 100644
--- a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_triggerauthentication.go
+++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_triggerauthentication.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/generated_expansion.go
index 4bf70708dad..648d26def21 100644
--- a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/generated_expansion.go
+++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/generated_expansion.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/keda_client.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/keda_client.go
index b765a8eae7c..f4bbe295401 100644
--- a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/keda_client.go
+++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/keda_client.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledjob.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledjob.go
index c5493ad4db6..e7eaa967521 100644
--- a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledjob.go
+++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledjob.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledobject.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledobject.go
index eac695b9ea3..9dcb8ded4b0 100644
--- a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledobject.go
+++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledobject.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/triggerauthentication.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/triggerauthentication.go
index 5fe86261af6..966e07a6c07 100644
--- a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/triggerauthentication.go
+++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/triggerauthentication.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/informers/externalversions/factory.go b/pkg/generated/informers/externalversions/factory.go
index 85f4eb8847a..38c2b5da8e6 100644
--- a/pkg/generated/informers/externalversions/factory.go
+++ b/pkg/generated/informers/externalversions/factory.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go
index 69ffbf1f4c3..4877b90eeec 100644
--- a/pkg/generated/informers/externalversions/generic.go
+++ b/pkg/generated/informers/externalversions/generic.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go
index 41f55602c7d..db18f8a994a 100644
--- a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go
+++ b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/informers/externalversions/keda/interface.go b/pkg/generated/informers/externalversions/keda/interface.go
index a256b990e9e..8a150503dab 100644
--- a/pkg/generated/informers/externalversions/keda/interface.go
+++ b/pkg/generated/informers/externalversions/keda/interface.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/informers/externalversions/keda/v1alpha1/clustertriggerauthentication.go b/pkg/generated/informers/externalversions/keda/v1alpha1/clustertriggerauthentication.go
index a98fea79eee..cb6a12601ad 100644
--- a/pkg/generated/informers/externalversions/keda/v1alpha1/clustertriggerauthentication.go
+++ b/pkg/generated/informers/externalversions/keda/v1alpha1/clustertriggerauthentication.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/informers/externalversions/keda/v1alpha1/interface.go b/pkg/generated/informers/externalversions/keda/v1alpha1/interface.go
index 3b8cbce7f00..6f17f7501ff 100644
--- a/pkg/generated/informers/externalversions/keda/v1alpha1/interface.go
+++ b/pkg/generated/informers/externalversions/keda/v1alpha1/interface.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/informers/externalversions/keda/v1alpha1/scaledjob.go b/pkg/generated/informers/externalversions/keda/v1alpha1/scaledjob.go
index 92d1e92db02..4d6c0d569c2 100644
--- a/pkg/generated/informers/externalversions/keda/v1alpha1/scaledjob.go
+++ b/pkg/generated/informers/externalversions/keda/v1alpha1/scaledjob.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/informers/externalversions/keda/v1alpha1/scaledobject.go b/pkg/generated/informers/externalversions/keda/v1alpha1/scaledobject.go
index ff8ac1ccbb6..543203d608c 100644
--- a/pkg/generated/informers/externalversions/keda/v1alpha1/scaledobject.go
+++ b/pkg/generated/informers/externalversions/keda/v1alpha1/scaledobject.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/informers/externalversions/keda/v1alpha1/triggerauthentication.go b/pkg/generated/informers/externalversions/keda/v1alpha1/triggerauthentication.go
index fdc79159c0c..aa43f499afb 100644
--- a/pkg/generated/informers/externalversions/keda/v1alpha1/triggerauthentication.go
+++ b/pkg/generated/informers/externalversions/keda/v1alpha1/triggerauthentication.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/listers/keda/v1alpha1/clustertriggerauthentication.go b/pkg/generated/listers/keda/v1alpha1/clustertriggerauthentication.go
index b98825e2444..da3f39d91ca 100644
--- a/pkg/generated/listers/keda/v1alpha1/clustertriggerauthentication.go
+++ b/pkg/generated/listers/keda/v1alpha1/clustertriggerauthentication.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/listers/keda/v1alpha1/expansion_generated.go b/pkg/generated/listers/keda/v1alpha1/expansion_generated.go
index 6dee045ea17..4916b84ca76 100644
--- a/pkg/generated/listers/keda/v1alpha1/expansion_generated.go
+++ b/pkg/generated/listers/keda/v1alpha1/expansion_generated.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/listers/keda/v1alpha1/scaledjob.go b/pkg/generated/listers/keda/v1alpha1/scaledjob.go
index def46b3ea21..10bd2409657 100644
--- a/pkg/generated/listers/keda/v1alpha1/scaledjob.go
+++ b/pkg/generated/listers/keda/v1alpha1/scaledjob.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/listers/keda/v1alpha1/scaledobject.go b/pkg/generated/listers/keda/v1alpha1/scaledobject.go
index 91a9e93016a..f55d9c37b21 100644
--- a/pkg/generated/listers/keda/v1alpha1/scaledobject.go
+++ b/pkg/generated/listers/keda/v1alpha1/scaledobject.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/generated/listers/keda/v1alpha1/triggerauthentication.go b/pkg/generated/listers/keda/v1alpha1/triggerauthentication.go
index 51638c94289..07c1a6edde1 100644
--- a/pkg/generated/listers/keda/v1alpha1/triggerauthentication.go
+++ b/pkg/generated/listers/keda/v1alpha1/triggerauthentication.go
@@ -1,5 +1,5 @@
/*
-Copyright 2023 The KEDA Authors
+Copyright 2024 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/metricscollector/metricscollectors.go b/pkg/metricscollector/metricscollectors.go
index 8b70b24ce57..299b287a819 100644
--- a/pkg/metricscollector/metricscollectors.go
+++ b/pkg/metricscollector/metricscollectors.go
@@ -31,22 +31,22 @@ var (
)
type MetricsCollector interface {
- RecordScalerMetric(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, value float64)
+ RecordScalerMetric(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, value float64)
// RecordScalerLatency create a measurement of the latency to external metric
- RecordScalerLatency(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, value float64)
+ RecordScalerLatency(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, value float64)
// RecordScalableObjectLatency create a measurement of the latency executing scalable object loop
RecordScalableObjectLatency(namespace string, name string, isScaledObject bool, value float64)
// RecordScalerActive create a measurement of the activity of the scaler
- RecordScalerActive(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, active bool)
+ RecordScalerActive(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, active bool)
// RecordScaledObjectPaused marks whether the current ScaledObject is paused.
RecordScaledObjectPaused(namespace string, scaledObject string, active bool)
// RecordScalerError counts the number of errors occurred in trying get an external metric used by the HPA
- RecordScalerError(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, err error)
+ RecordScalerError(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, err error)
// RecordScaledObjectError counts the number of errors with the scaled object
RecordScaledObjectError(namespace string, scaledObject string, err error)
@@ -82,16 +82,16 @@ func NewMetricsCollectors(enablePrometheusMetrics bool, enableOpenTelemetryMetri
}
// RecordScalerMetric create a measurement of the external metric used by the HPA
-func RecordScalerMetric(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, value float64) {
+func RecordScalerMetric(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, value float64) {
for _, element := range collectors {
- element.RecordScalerMetric(namespace, scaledObject, scaler, scalerIndex, metric, value)
+ element.RecordScalerMetric(namespace, scaledObject, scaler, triggerIndex, metric, value)
}
}
// RecordScalerLatency create a measurement of the latency to external metric
-func RecordScalerLatency(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, value float64) {
+func RecordScalerLatency(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, value float64) {
for _, element := range collectors {
- element.RecordScalerLatency(namespace, scaledObject, scaler, scalerIndex, metric, value)
+ element.RecordScalerLatency(namespace, scaledObject, scaler, triggerIndex, metric, value)
}
}
@@ -103,9 +103,9 @@ func RecordScalableObjectLatency(namespace string, name string, isScaledObject b
}
// RecordScalerActive create a measurement of the activity of the scaler
-func RecordScalerActive(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, active bool) {
+func RecordScalerActive(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, active bool) {
for _, element := range collectors {
- element.RecordScalerActive(namespace, scaledObject, scaler, scalerIndex, metric, active)
+ element.RecordScalerActive(namespace, scaledObject, scaler, triggerIndex, metric, active)
}
}
@@ -117,9 +117,9 @@ func RecordScaledObjectPaused(namespace string, scaledObject string, active bool
}
// RecordScalerError counts the number of errors occurred in trying get an external metric used by the HPA
-func RecordScalerError(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, err error) {
+func RecordScalerError(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, err error) {
for _, element := range collectors {
- element.RecordScalerError(namespace, scaledObject, scaler, scalerIndex, metric, err)
+ element.RecordScalerError(namespace, scaledObject, scaler, triggerIndex, metric, err)
}
}
diff --git a/pkg/metricscollector/opentelemetry.go b/pkg/metricscollector/opentelemetry.go
index 1bc9d0c3a5f..0c4ff961634 100644
--- a/pkg/metricscollector/opentelemetry.go
+++ b/pkg/metricscollector/opentelemetry.go
@@ -188,9 +188,9 @@ func ScalerMetricValueCallback(_ context.Context, obsrv api.Float64Observer) err
return nil
}
-func (o *OtelMetrics) RecordScalerMetric(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, value float64) {
+func (o *OtelMetrics) RecordScalerMetric(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, value float64) {
otelScalerMetricVal.val = value
- otelScalerMetricVal.measurementOption = getScalerMeasurementOption(namespace, scaledObject, scaler, scalerIndex, metric)
+ otelScalerMetricVal.measurementOption = getScalerMeasurementOption(namespace, scaledObject, scaler, triggerIndex, metric)
}
func ScalerMetricsLatencyCallback(_ context.Context, obsrv api.Float64Observer) error {
@@ -202,9 +202,9 @@ func ScalerMetricsLatencyCallback(_ context.Context, obsrv api.Float64Observer)
}
// RecordScalerLatency create a measurement of the latency to external metric
-func (o *OtelMetrics) RecordScalerLatency(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, value float64) {
+func (o *OtelMetrics) RecordScalerLatency(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, value float64) {
otelScalerMetricsLatencyVal.val = value
- otelScalerMetricsLatencyVal.measurementOption = getScalerMeasurementOption(namespace, scaledObject, scaler, scalerIndex, metric)
+ otelScalerMetricsLatencyVal.measurementOption = getScalerMeasurementOption(namespace, scaledObject, scaler, triggerIndex, metric)
}
func ScalableObjectLatencyCallback(_ context.Context, obsrv api.Float64Observer) error {
@@ -240,14 +240,14 @@ func ScalerActiveCallback(_ context.Context, obsrv api.Float64Observer) error {
}
// RecordScalerActive create a measurement of the activity of the scaler
-func (o *OtelMetrics) RecordScalerActive(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, active bool) {
+func (o *OtelMetrics) RecordScalerActive(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, active bool) {
activeVal := -1
if active {
activeVal = 1
}
otelScalerActiveVal.val = float64(activeVal)
- otelScalerActiveVal.measurementOption = getScalerMeasurementOption(namespace, scaledObject, scaler, scalerIndex, metric)
+ otelScalerActiveVal.measurementOption = getScalerMeasurementOption(namespace, scaledObject, scaler, triggerIndex, metric)
}
// RecordScaledObjectPaused marks whether the current ScaledObject is paused.
@@ -277,9 +277,9 @@ func (o *OtelMetrics) RecordScaledObjectPaused(namespace string, scaledObject st
}
// RecordScalerError counts the number of errors occurred in trying get an external metric used by the HPA
-func (o *OtelMetrics) RecordScalerError(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, err error) {
+func (o *OtelMetrics) RecordScalerError(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, err error) {
if err != nil {
- otScalerErrorsCounter.Add(context.Background(), 1, getScalerMeasurementOption(namespace, scaledObject, scaler, scalerIndex, metric))
+ otScalerErrorsCounter.Add(context.Background(), 1, getScalerMeasurementOption(namespace, scaledObject, scaler, triggerIndex, metric))
o.RecordScaledObjectError(namespace, scaledObject, err)
return
}
@@ -332,12 +332,12 @@ func (o *OtelMetrics) DecrementCRDTotal(crdType, namespace string) {
otCrdTotalsCounter.Add(context.Background(), -1, opt)
}
-func getScalerMeasurementOption(namespace string, scaledObject string, scaler string, scalerIndex int, metric string) api.MeasurementOption {
+func getScalerMeasurementOption(namespace string, scaledObject string, scaler string, triggerIndex int, metric string) api.MeasurementOption {
return api.WithAttributes(
attribute.Key("namespace").String(namespace),
attribute.Key("scaledObject").String(scaledObject),
attribute.Key("scaler").String(scaler),
- attribute.Key("scalerIndex").String(strconv.Itoa(scalerIndex)),
+ attribute.Key("triggerIndex").String(strconv.Itoa(triggerIndex)),
attribute.Key("metric").String(metric),
)
}
diff --git a/pkg/metricscollector/prommetrics.go b/pkg/metricscollector/prommetrics.go
index 9460bbe0505..825bceed9a7 100644
--- a/pkg/metricscollector/prommetrics.go
+++ b/pkg/metricscollector/prommetrics.go
@@ -30,7 +30,7 @@ import (
var log = logf.Log.WithName("prometheus_server")
var (
- metricLabels = []string{"namespace", "metric", "scaledObject", "scaler", "scalerIndex"}
+ metricLabels = []string{"namespace", "metric", "scaledObject", "scaler", "triggerIndex"}
buildInfo = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: DefaultPromMetricsNamespace,
@@ -183,13 +183,13 @@ func RecordBuildInfo() {
}
// RecordScalerMetric create a measurement of the external metric used by the HPA
-func (p *PromMetrics) RecordScalerMetric(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, value float64) {
- scalerMetricsValue.With(getLabels(namespace, scaledObject, scaler, scalerIndex, metric)).Set(value)
+func (p *PromMetrics) RecordScalerMetric(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, value float64) {
+ scalerMetricsValue.With(getLabels(namespace, scaledObject, scaler, triggerIndex, metric)).Set(value)
}
// RecordScalerLatency create a measurement of the latency to external metric
-func (p *PromMetrics) RecordScalerLatency(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, value float64) {
- scalerMetricsLatency.With(getLabels(namespace, scaledObject, scaler, scalerIndex, metric)).Set(value)
+func (p *PromMetrics) RecordScalerLatency(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, value float64) {
+ scalerMetricsLatency.With(getLabels(namespace, scaledObject, scaler, triggerIndex, metric)).Set(value)
}
// RecordScalableObjectLatency create a measurement of the latency executing scalable object loop
@@ -202,13 +202,13 @@ func (p *PromMetrics) RecordScalableObjectLatency(namespace string, name string,
}
// RecordScalerActive create a measurement of the activity of the scaler
-func (p *PromMetrics) RecordScalerActive(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, active bool) {
+func (p *PromMetrics) RecordScalerActive(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, active bool) {
activeVal := 0
if active {
activeVal = 1
}
- scalerActive.With(getLabels(namespace, scaledObject, scaler, scalerIndex, metric)).Set(float64(activeVal))
+ scalerActive.With(getLabels(namespace, scaledObject, scaler, triggerIndex, metric)).Set(float64(activeVal))
}
// RecordScaledObjectPaused marks whether the current ScaledObject is paused.
@@ -224,15 +224,15 @@ func (p *PromMetrics) RecordScaledObjectPaused(namespace string, scaledObject st
}
// RecordScalerError counts the number of errors occurred in trying get an external metric used by the HPA
-func (p *PromMetrics) RecordScalerError(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, err error) {
+func (p *PromMetrics) RecordScalerError(namespace string, scaledObject string, scaler string, triggerIndex int, metric string, err error) {
if err != nil {
- scalerErrors.With(getLabels(namespace, scaledObject, scaler, scalerIndex, metric)).Inc()
+ scalerErrors.With(getLabels(namespace, scaledObject, scaler, triggerIndex, metric)).Inc()
p.RecordScaledObjectError(namespace, scaledObject, err)
scalerErrorsTotal.With(prometheus.Labels{}).Inc()
return
}
// initialize metric with 0 if not already set
- _, errscaler := scalerErrors.GetMetricWith(getLabels(namespace, scaledObject, scaler, scalerIndex, metric))
+ _, errscaler := scalerErrors.GetMetricWith(getLabels(namespace, scaledObject, scaler, triggerIndex, metric))
if errscaler != nil {
log.Error(errscaler, "Unable to write to metrics to Prometheus Server: %v")
}
@@ -253,8 +253,8 @@ func (p *PromMetrics) RecordScaledObjectError(namespace string, scaledObject str
}
}
-func getLabels(namespace string, scaledObject string, scaler string, scalerIndex int, metric string) prometheus.Labels {
- return prometheus.Labels{"namespace": namespace, "scaledObject": scaledObject, "scaler": scaler, "scalerIndex": strconv.Itoa(scalerIndex), "metric": metric}
+func getLabels(namespace string, scaledObject string, scaler string, triggerIndex int, metric string) prometheus.Labels {
+ return prometheus.Labels{"namespace": namespace, "scaledObject": scaledObject, "scaler": scaler, "triggerIndex": strconv.Itoa(triggerIndex), "metric": metric}
}
func (p *PromMetrics) IncrementTriggerTotal(triggerType string) {
diff --git a/pkg/metricsservice/api/metrics.pb.go b/pkg/metricsservice/api/metrics.pb.go
index 95e3100a780..e2a0aceed40 100644
--- a/pkg/metricsservice/api/metrics.pb.go
+++ b/pkg/metricsservice/api/metrics.pb.go
@@ -15,8 +15,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
-// protoc v4.23.2
+// protoc-gen-go v1.31.0
+// protoc v4.23.4
// source: metrics.proto
package api
diff --git a/pkg/metricsservice/api/metrics_grpc.pb.go b/pkg/metricsservice/api/metrics_grpc.pb.go
index 9eae639dc04..8836b080af0 100644
--- a/pkg/metricsservice/api/metrics_grpc.pb.go
+++ b/pkg/metricsservice/api/metrics_grpc.pb.go
@@ -16,7 +16,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
-// - protoc v4.23.2
+// - protoc v4.23.4
// source: metrics.proto
package api
diff --git a/pkg/scalers/activemq_scaler.go b/pkg/scalers/activemq_scaler.go
index 8b39ba74f6e..8aa4bcfb9dd 100644
--- a/pkg/scalers/activemq_scaler.go
+++ b/pkg/scalers/activemq_scaler.go
@@ -37,7 +37,7 @@ type activeMQMetadata struct {
activationTargetQueueSize int64
corsHeader string
metricName string
- scalerIndex int
+ triggerIndex int
}
type activeMQMonitoring struct {
@@ -159,9 +159,9 @@ func parseActiveMQMetadata(config *ScalerConfig) (*activeMQMetadata, error) {
return nil, fmt.Errorf("password cannot be empty")
}
- meta.metricName = GenerateMetricNameWithIndex(config.ScalerIndex, kedautil.NormalizeString(fmt.Sprintf("activemq-%s", meta.destinationName)))
+ meta.metricName = GenerateMetricNameWithIndex(config.TriggerIndex, kedautil.NormalizeString(fmt.Sprintf("activemq-%s", meta.destinationName)))
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
diff --git a/pkg/scalers/activemq_scaler_test.go b/pkg/scalers/activemq_scaler_test.go
index 44107b84e80..90fa572b8fd 100644
--- a/pkg/scalers/activemq_scaler_test.go
+++ b/pkg/scalers/activemq_scaler_test.go
@@ -20,7 +20,7 @@ type parseActiveMQMetadataTestData struct {
type activeMQMetricIdentifier struct {
metadataTestData *parseActiveMQMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -296,7 +296,7 @@ func TestParseDefaultTargetQueueSize(t *testing.T) {
func TestActiveMQGetMetricSpecForScaling(t *testing.T) {
for _, testData := range activeMQMetricIdentifiers {
ctx := context.Background()
- metadata, err := parseActiveMQMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex})
+ metadata, err := parseActiveMQMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
@@ -343,7 +343,7 @@ func TestActiveMQGetMonitoringEndpoint(t *testing.T) {
"password": "pass123",
}
for _, testData := range getMonitoringEndpointData {
- metadata, err := parseActiveMQMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: authParams, ScalerIndex: 0})
+ metadata, err := parseActiveMQMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: authParams, TriggerIndex: 0})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/apache_kafka_scaler.go b/pkg/scalers/apache_kafka_scaler.go
index 5d70d2ee660..97dd49c6552 100644
--- a/pkg/scalers/apache_kafka_scaler.go
+++ b/pkg/scalers/apache_kafka_scaler.go
@@ -35,6 +35,7 @@ import (
v2 "k8s.io/api/autoscaling/v2"
"k8s.io/metrics/pkg/apis/external_metrics"
+ awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws"
kedautil "github.com/kedacore/keda/v2/pkg/util"
)
@@ -70,7 +71,7 @@ type apacheKafkaMetadata struct {
// MSK
awsRegion string
awsEndpoint string
- awsAuthorization awsAuthorizationMetadata
+ awsAuthorization awsutils.AuthorizationMetadata
// TLS
enableTLS bool
@@ -79,7 +80,7 @@ type apacheKafkaMetadata struct {
keyPassword string
ca string
- scalerIndex int
+ triggerIndex int
}
const (
@@ -196,7 +197,7 @@ func parseApacheKafkaAuthParams(config *ScalerConfig, meta *apacheKafkaMetadata)
} else {
return errors.New("no awsRegion given")
}
- auth, err := getAwsAuthorization(config.AuthParams, config.TriggerMetadata, config.ResolvedEnv)
+ auth, err := awsutils.GetAwsAuthorization(config.TriggerUniqueKey, config.PodIdentity, config.TriggerMetadata, config.AuthParams, config.ResolvedEnv)
if err != nil {
return err
}
@@ -356,7 +357,7 @@ func parseApacheKafkaMetadata(config *ScalerConfig, logger logr.Logger) (apacheK
}
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return meta, nil
}
@@ -394,7 +395,7 @@ func getApacheKafkaClient(ctx context.Context, metadata apacheKafkaMetadata, log
case KafkaSASLTypeOAuthbearer:
return nil, errors.New("SASL/OAUTHBEARER is not implemented yet")
case KafkaSASLTypeMskIam:
- cfg, err := getAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization)
+ cfg, err := awsutils.GetAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization)
if err != nil {
return nil, err
}
@@ -581,7 +582,7 @@ func (s *apacheKafkaScaler) GetMetricSpecForScaling(context.Context) []v2.Metric
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(metricName)),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(metricName)),
},
Target: GetMetricTarget(s.metricType, s.metadata.lagThreshold),
}
diff --git a/pkg/scalers/apache_kafka_scaler_test.go b/pkg/scalers/apache_kafka_scaler_test.go
index c5ab714dc1b..ed09849496e 100644
--- a/pkg/scalers/apache_kafka_scaler_test.go
+++ b/pkg/scalers/apache_kafka_scaler_test.go
@@ -39,7 +39,7 @@ type parseApacheKafkaAuthParamsTestDataSecondAuthMethod struct {
type apacheKafkaMetricIdentifier struct {
metadataTestData *parseApacheKafkaMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -368,7 +368,7 @@ func TestApacheKafkaAuthParams(t *testing.T) {
func TestApacheKafkaGetMetricSpecForScaling(t *testing.T) {
for _, testData := range apacheKafkaMetricIdentifiers {
- meta, err := parseApacheKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: validApacheKafkaWithAuthParams, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ meta, err := parseApacheKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: validApacheKafkaWithAuthParams, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/arangodb_scaler.go b/pkg/scalers/arangodb_scaler.go
index a9dd1f558fa..4a19bf77419 100644
--- a/pkg/scalers/arangodb_scaler.go
+++ b/pkg/scalers/arangodb_scaler.go
@@ -64,7 +64,7 @@ type arangoDBMetadata struct {
// The index of the scaler inside the ScaledObject
// +internal
- scalerIndex int
+ triggerIndex int
}
// NewArangoDBScaler creates a new arangodbScaler
@@ -201,7 +201,7 @@ func parseArangoDBMetadata(config *ScalerConfig) (*arangoDBMetadata, error) {
}
meta.arangoDBAuth = arangoDBAuth
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -271,7 +271,7 @@ func (s *arangoDBScaler) GetMetricsAndActivity(ctx context.Context, metricName s
func (s *arangoDBScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, "arangodb"),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, "arangodb"),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.queryValue),
}
diff --git a/pkg/scalers/arangodb_scaler_test.go b/pkg/scalers/arangodb_scaler_test.go
index 3b4057b2f23..70081eade20 100644
--- a/pkg/scalers/arangodb_scaler_test.go
+++ b/pkg/scalers/arangodb_scaler_test.go
@@ -68,13 +68,13 @@ var testArangoDBAuthMetadata = []arangoDBAuthMetadataTestData{
type arangoDBMetricIdentifier struct {
metadataTestData *parseArangoDBMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
var arangoDBMetricIdentifiers = []arangoDBMetricIdentifier{
- {metadataTestData: &testArangoDBMetadata[2], scalerIndex: 0, name: "s0-arangodb"},
- {metadataTestData: &testArangoDBMetadata[2], scalerIndex: 1, name: "s1-arangodb"},
+ {metadataTestData: &testArangoDBMetadata[2], triggerIndex: 0, name: "s0-arangodb"},
+ {metadataTestData: &testArangoDBMetadata[2], triggerIndex: 1, name: "s1-arangodb"},
}
func TestParseArangoDBMetadata(t *testing.T) {
@@ -113,7 +113,7 @@ func TestArangoDBGetMetricSpecForScaling(t *testing.T) {
meta, err := parseArangoDBMetadata(&ScalerConfig{
AuthParams: testData.metadataTestData.authParams,
TriggerMetadata: testData.metadataTestData.metadata,
- ScalerIndex: testData.scalerIndex,
+ TriggerIndex: testData.triggerIndex,
})
if err != nil {
t.Fatal("Could not parse metadata:", err)
diff --git a/pkg/scalers/artemis_scaler.go b/pkg/scalers/artemis_scaler.go
index 0b480502f93..402e8012525 100644
--- a/pkg/scalers/artemis_scaler.go
+++ b/pkg/scalers/artemis_scaler.go
@@ -36,7 +36,7 @@ type artemisMetadata struct {
queueLength int64
activationQueueLength int64
corsHeader string
- scalerIndex int
+ triggerIndex int
}
//revive:enable:var-naming
@@ -171,7 +171,7 @@ func parseArtemisMetadata(config *ScalerConfig) (*artemisMetadata, error) {
return nil, fmt.Errorf("password cannot be empty")
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -257,7 +257,7 @@ func (s *artemisScaler) getQueueMessageCount(ctx context.Context) (int64, error)
func (s *artemisScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("artemis-%s", s.metadata.queueName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("artemis-%s", s.metadata.queueName))),
},
Target: GetMetricTarget(s.metricType, s.metadata.queueLength),
}
diff --git a/pkg/scalers/artemis_scaler_test.go b/pkg/scalers/artemis_scaler_test.go
index 5bf2d8ed7ff..ea2bfe2b841 100644
--- a/pkg/scalers/artemis_scaler_test.go
+++ b/pkg/scalers/artemis_scaler_test.go
@@ -18,7 +18,7 @@ type parseArtemisMetadataTestData struct {
type artemisMetricIdentifier struct {
metadataTestData *parseArtemisMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -145,7 +145,7 @@ func TestArtemisParseMetadata(t *testing.T) {
func TestArtemisGetMetricSpecForScaling(t *testing.T) {
for _, testData := range artemisMetricIdentifiers {
ctx := context.Background()
- meta, err := parseArtemisMetadata(&ScalerConfig{ResolvedEnv: sampleArtemisResolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: nil, ScalerIndex: testData.scalerIndex})
+ meta, err := parseArtemisMetadata(&ScalerConfig{ResolvedEnv: sampleArtemisResolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: nil, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/aws/aws_authorization.go b/pkg/scalers/aws/aws_authorization.go
new file mode 100644
index 00000000000..fd49c2f8995
--- /dev/null
+++ b/pkg/scalers/aws/aws_authorization.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2024 The KEDA Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package aws
+
+type AuthorizationMetadata struct {
+ AwsRoleArn string
+
+ AwsAccessKeyID string
+ AwsSecretAccessKey string
+ AwsSessionToken string
+
+ // Deprecated
+ PodIdentityOwner bool
+ // Pod identity owner is confusing and it'll be removed when we get
+ // rid of the old aws podIdentities (aws-eks and aws-kiam) as UsingPodIdentity
+ // replaces it. For more context:
+ // https://github.com/kedacore/keda/pull/5061/#discussion_r1441016441
+ UsingPodIdentity bool
+
+ TriggerUniqueKey string
+}
diff --git a/pkg/scalers/aws/aws_common.go b/pkg/scalers/aws/aws_common.go
new file mode 100644
index 00000000000..b581a4f8d30
--- /dev/null
+++ b/pkg/scalers/aws/aws_common.go
@@ -0,0 +1,142 @@
+/*
+Copyright 2024 The KEDA Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+This file contains the logic for parsing trigger information into
+a common AuthorizationMetadata. This also contains the logic for
+getting *aws.Config from a given AuthorizationMetadata, recovering
+from the cache if it's a method which supports caching.
+*/
+
+package aws
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+ "github.com/aws/aws-sdk-go-v2/service/sts"
+
+ kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
+)
+
+// ErrAwsNoAccessKey is returned when awsAccessKeyID is missing.
+var ErrAwsNoAccessKey = errors.New("awsAccessKeyID not found")
+
+type awsConfigMetadata struct {
+ awsRegion string
+ awsAuthorization AuthorizationMetadata
+}
+
+var awsSharedCredentialsCache = newSharedConfigsCache()
+
+// GetAwsConfig returns an *aws.Config for a given AuthorizationMetadata
+// If AuthorizationMetadata uses static credentials or `aws` auth,
+// we recover the *aws.Config from the shared cache. If not, we generate
+// a new entry on each request
+func GetAwsConfig(ctx context.Context, awsRegion string, awsAuthorization AuthorizationMetadata) (*aws.Config, error) {
+ metadata := &awsConfigMetadata{
+ awsRegion: awsRegion,
+ awsAuthorization: awsAuthorization,
+ }
+
+ if metadata.awsAuthorization.UsingPodIdentity ||
+ (metadata.awsAuthorization.AwsAccessKeyID != "" && metadata.awsAuthorization.AwsSecretAccessKey != "") {
+ return awsSharedCredentialsCache.GetCredentials(ctx, metadata.awsRegion, metadata.awsAuthorization)
+ }
+
+ // TODO, remove when aws-kiam and aws-eks are removed
+ configOptions := make([]func(*config.LoadOptions) error, 0)
+ configOptions = append(configOptions, config.WithRegion(metadata.awsRegion))
+ cfg, err := config.LoadDefaultConfig(ctx, configOptions...)
+ if err != nil {
+ return nil, err
+ }
+
+ if !metadata.awsAuthorization.PodIdentityOwner {
+ return &cfg, nil
+ }
+
+ if metadata.awsAuthorization.AwsRoleArn != "" {
+ stsSvc := sts.NewFromConfig(cfg)
+ stsCredentialProvider := stscreds.NewAssumeRoleProvider(stsSvc, metadata.awsAuthorization.AwsRoleArn, func(options *stscreds.AssumeRoleOptions) {})
+ cfg.Credentials = aws.NewCredentialsCache(stsCredentialProvider)
+ }
+ return &cfg, err
+ // END remove when aws-kiam and aws-eks are removed
+}
+
+// GetAwsAuthorization returns an AuthorizationMetadata based on trigger information
+func GetAwsAuthorization(uniqueKey string, podIdentity kedav1alpha1.AuthPodIdentity, triggerMetadata, authParams, resolvedEnv map[string]string) (AuthorizationMetadata, error) {
+ meta := AuthorizationMetadata{
+ TriggerUniqueKey: uniqueKey,
+ }
+
+ if podIdentity.Provider == kedav1alpha1.PodIdentityProviderAws {
+ meta.UsingPodIdentity = true
+ if val, ok := authParams["awsRoleArn"]; ok && val != "" {
+ meta.AwsRoleArn = val
+ }
+ return meta, nil
+ }
+ // TODO, remove all the logic below and just keep the logic for
+ // parsing awsAccessKeyID, awsSecretAccessKey and awsSessionToken
+ // when aws-kiam and aws-eks are removed
+ if triggerMetadata["identityOwner"] == "operator" {
+ meta.PodIdentityOwner = false
+ } else if triggerMetadata["identityOwner"] == "" || triggerMetadata["identityOwner"] == "pod" {
+ meta.PodIdentityOwner = true
+ switch {
+ case authParams["awsRoleArn"] != "":
+ meta.AwsRoleArn = authParams["awsRoleArn"]
+ case (authParams["awsAccessKeyID"] != "" || authParams["awsAccessKeyId"] != "") && authParams["awsSecretAccessKey"] != "":
+ meta.AwsAccessKeyID = authParams["awsAccessKeyID"]
+ if meta.AwsAccessKeyID == "" {
+ meta.AwsAccessKeyID = authParams["awsAccessKeyId"]
+ }
+ meta.AwsSecretAccessKey = authParams["awsSecretAccessKey"]
+ meta.AwsSessionToken = authParams["awsSessionToken"]
+ default:
+ if triggerMetadata["awsAccessKeyID"] != "" {
+ meta.AwsAccessKeyID = triggerMetadata["awsAccessKeyID"]
+ } else if triggerMetadata["awsAccessKeyIDFromEnv"] != "" {
+ meta.AwsAccessKeyID = resolvedEnv[triggerMetadata["awsAccessKeyIDFromEnv"]]
+ }
+
+ if len(meta.AwsAccessKeyID) == 0 {
+ return meta, ErrAwsNoAccessKey
+ }
+
+ if triggerMetadata["awsSecretAccessKeyFromEnv"] != "" {
+ meta.AwsSecretAccessKey = resolvedEnv[triggerMetadata["awsSecretAccessKeyFromEnv"]]
+ }
+
+ if len(meta.AwsSecretAccessKey) == 0 {
+ return meta, fmt.Errorf("awsSecretAccessKey not found")
+ }
+ }
+ }
+
+ return meta, nil
+}
+
+// ClearAwsConfig wraps the removal of the config from the cache
+func ClearAwsConfig(awsAuthorization AuthorizationMetadata) {
+ awsSharedCredentialsCache.RemoveCachedEntry(awsAuthorization)
+}
diff --git a/pkg/scalers/aws/aws_config_cache.go b/pkg/scalers/aws/aws_config_cache.go
new file mode 100644
index 00000000000..684e45c743b
--- /dev/null
+++ b/pkg/scalers/aws/aws_config_cache.go
@@ -0,0 +1,180 @@
+/*
+Copyright 2024 The KEDA Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+This file contains all the logic for caching aws.Config across all the (AWS)
+triggers. The first time when an aws.Config is requested, it's cached based on
+the authentication info (roleArn, Key&Secret, keda itself) and it's returned
+every time when an aws.Config is requested for the same authentication info.
+This is required because if we don't cache and share them, each scaler
+generates and refresh it's own token although all the tokens grants the same
+permissions
+*/
+
+package aws
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+ "github.com/aws/aws-sdk-go-v2/service/sts"
+ "github.com/go-logr/logr"
+ "golang.org/x/crypto/sha3"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
+)
+
+var (
+ webIdentityTokenFile = os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")
+)
+
+// cacheEntry stores *aws.Config and where is used
+type cacheEntry struct {
+ config *aws.Config
+ usages map[string]bool // Tracks the resources which have requested the cache
+}
+
+// sharedConfigCache is a shared cache for storing all *aws.Config
+// across all (AWS) triggers
+type sharedConfigCache struct {
+ sync.Mutex
+ items map[string]cacheEntry
+ logger logr.Logger
+}
+
+func newSharedConfigsCache() sharedConfigCache {
+ return sharedConfigCache{items: map[string]cacheEntry{}, logger: logf.Log.WithName("aws_credentials_cache")}
+}
+
+// getCacheKey returns a unique key based on given AuthorizationMetadata.
+// As it can contain sensitive data, the key is hashed to not expose secrets
+func (a *sharedConfigCache) getCacheKey(awsAuthorization AuthorizationMetadata) string {
+ key := "keda"
+ if awsAuthorization.AwsAccessKeyID != "" {
+ key = fmt.Sprintf("%s-%s-%s", awsAuthorization.AwsAccessKeyID, awsAuthorization.AwsSecretAccessKey, awsAuthorization.AwsSessionToken)
+ } else if awsAuthorization.AwsRoleArn != "" {
+ key = awsAuthorization.AwsRoleArn
+ }
+ // to avoid sensitive data as key and to use a constant key size,
+ // we hash the key with sha3
+ hash := sha3.Sum224([]byte(key))
+ return hex.EncodeToString(hash[:])
+}
+
+// GetCredentials returns *aws.Config for a given AuthorizationMetadata.
+// The *aws.Config is also cached for next requests with same AuthorizationMetadata,
+// sharing it between all the requests. To track if the *aws.Config is used by whom,
+// every time when an scaler requests *aws.Config we register it inside
+// the cached item.
+func (a *sharedConfigCache) GetCredentials(ctx context.Context, awsRegion string, awsAuthorization AuthorizationMetadata) (*aws.Config, error) {
+ a.Lock()
+ defer a.Unlock()
+ key := a.getCacheKey(awsAuthorization)
+ if cachedEntry, exists := a.items[key]; exists {
+ cachedEntry.usages[awsAuthorization.TriggerUniqueKey] = true
+ a.items[key] = cachedEntry
+ return cachedEntry.config, nil
+ }
+
+ configOptions := make([]func(*config.LoadOptions) error, 0)
+ configOptions = append(configOptions, config.WithRegion(awsRegion))
+ cfg, err := config.LoadDefaultConfig(ctx, configOptions...)
+ if err != nil {
+ return nil, err
+ }
+
+ if awsAuthorization.UsingPodIdentity {
+ if awsAuthorization.AwsRoleArn != "" {
+ cfg.Credentials = a.retrievePodIdentityCredentials(ctx, cfg, awsAuthorization.AwsRoleArn)
+ }
+ } else {
+ cfg.Credentials = a.retrieveStaticCredentials(awsAuthorization)
+ }
+
+ newCacheEntry := cacheEntry{
+ config: &cfg,
+ usages: map[string]bool{
+ awsAuthorization.TriggerUniqueKey: true,
+ },
+ }
+ a.items[key] = newCacheEntry
+
+ return &cfg, nil
+}
+
+// RemoveCachedEntry removes the usage of an AuthorizationMetadata from the cached item.
+// If there isn't any usage of a given cached item (because there isn't any trigger using the aws.Config),
+// we also remove it from the cache
+func (a *sharedConfigCache) RemoveCachedEntry(awsAuthorization AuthorizationMetadata) {
+ a.Lock()
+ defer a.Unlock()
+ key := a.getCacheKey(awsAuthorization)
+ if cachedEntry, exists := a.items[key]; exists {
+ // Delete the TriggerUniqueKey from usages
+ delete(cachedEntry.usages, awsAuthorization.TriggerUniqueKey)
+
+ // If no more usages, delete the entire entry from the cache
+ if len(cachedEntry.usages) == 0 {
+ delete(a.items, key)
+ } else {
+ a.items[awsAuthorization.AwsRoleArn] = cachedEntry
+ }
+ }
+}
+
+// retrievePodIdentityCredentials returns an *aws.CredentialsCache to assume given roleArn.
+// It tries first to assume the role using WebIdentity (OIDC federation) and if this method fails,
+// it tries to assume the role using KEDA's role (AssumeRole)
+func (a *sharedConfigCache) retrievePodIdentityCredentials(ctx context.Context, cfg aws.Config, roleArn string) *aws.CredentialsCache {
+ stsSvc := sts.NewFromConfig(cfg)
+
+ if webIdentityTokenFile != "" {
+ webIdentityCredentialProvider := stscreds.NewWebIdentityRoleProvider(stsSvc, roleArn, stscreds.IdentityTokenFile(webIdentityTokenFile), func(options *stscreds.WebIdentityRoleOptions) {
+ options.RoleSessionName = "KEDA"
+ })
+
+ ctx, cancel := context.WithTimeout(ctx, time.Second*5)
+ defer cancel()
+ _, err := webIdentityCredentialProvider.Retrieve(ctx)
+ if err == nil {
+ a.logger.V(1).Info(fmt.Sprintf("using assume web identity role to retrieve token for arnRole %s", roleArn))
+ return aws.NewCredentialsCache(webIdentityCredentialProvider)
+ }
+ a.logger.V(1).Error(err, fmt.Sprintf("error retreiving arnRole %s via WebIdentity", roleArn))
+ }
+
+ // Fallback to Assume Role
+ a.logger.V(1).Info(fmt.Sprintf("using assume role to retrieve token for arnRole %s", roleArn))
+ assumeRoleCredentialProvider := stscreds.NewAssumeRoleProvider(stsSvc, roleArn, func(options *stscreds.AssumeRoleOptions) {
+ options.RoleSessionName = "KEDA"
+ })
+ return aws.NewCredentialsCache(assumeRoleCredentialProvider)
+}
+
+// retrieveStaticCredentials returns an *aws.CredentialsCache for given
+// AuthorizationMetadata (using static credentials). This is used for static
+// authenticatyion via AwsAccessKeyID & AwsAccessKeySecret
+func (*sharedConfigCache) retrieveStaticCredentials(awsAuthorization AuthorizationMetadata) *aws.CredentialsCache {
+ staticCredentialsProvider := aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(awsAuthorization.AwsAccessKeyID, awsAuthorization.AwsSecretAccessKey, awsAuthorization.AwsSessionToken))
+ return staticCredentialsProvider
+}
diff --git a/pkg/scalers/aws/aws_config_cache_test.go b/pkg/scalers/aws/aws_config_cache_test.go
new file mode 100644
index 00000000000..d94247a6fee
--- /dev/null
+++ b/pkg/scalers/aws/aws_config_cache_test.go
@@ -0,0 +1,111 @@
+/*
+Copyright 2024 The KEDA Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package aws
+
+import (
+ "context"
+ "testing"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/go-logr/logr"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetCredentialsReturnNewItemAndStoreItIfNotExist(t *testing.T) {
+ cache := newSharedConfigsCache()
+ cache.logger = logr.Discard()
+ config := awsConfigMetadata{
+ awsRegion: "test-region",
+ awsAuthorization: AuthorizationMetadata{
+ TriggerUniqueKey: "test-key",
+ },
+ }
+ cacheKey := cache.getCacheKey(config.awsAuthorization)
+ _, err := cache.GetCredentials(context.Background(), config.awsRegion, config.awsAuthorization)
+ assert.NoError(t, err)
+ assert.Contains(t, cache.items, cacheKey)
+ assert.Contains(t, cache.items[cacheKey].usages, config.awsAuthorization.TriggerUniqueKey)
+}
+
+func TestGetCredentialsReturnCachedItemIfExist(t *testing.T) {
+ cache := newSharedConfigsCache()
+ cache.logger = logr.Discard()
+ config := awsConfigMetadata{
+ awsRegion: "test1-region",
+ awsAuthorization: AuthorizationMetadata{
+ TriggerUniqueKey: "test1-key",
+ },
+ }
+ cfg := aws.Config{}
+ cfg.AppID = "test1-app"
+ cacheKey := cache.getCacheKey(config.awsAuthorization)
+ cache.items[cacheKey] = cacheEntry{
+ config: &cfg,
+ usages: map[string]bool{
+ "other-usage": true,
+ },
+ }
+ configFromCache, err := cache.GetCredentials(context.Background(), config.awsRegion, config.awsAuthorization)
+ assert.NoError(t, err)
+ assert.Equal(t, &cfg, configFromCache)
+ assert.Contains(t, cache.items[cacheKey].usages, config.awsAuthorization.TriggerUniqueKey)
+}
+
+func TestRemoveCachedEntryRemovesCachedItemIfNotUsages(t *testing.T) {
+ cache := newSharedConfigsCache()
+ cache.logger = logr.Discard()
+ config := awsConfigMetadata{
+ awsRegion: "test2-region",
+ awsAuthorization: AuthorizationMetadata{
+ TriggerUniqueKey: "test2-key",
+ },
+ }
+ cfg := aws.Config{}
+ cfg.AppID = "test2-app"
+ cacheKey := cache.getCacheKey(config.awsAuthorization)
+ cache.items[cacheKey] = cacheEntry{
+ config: &cfg,
+ usages: map[string]bool{
+ config.awsAuthorization.TriggerUniqueKey: true,
+ },
+ }
+ cache.RemoveCachedEntry(config.awsAuthorization)
+ assert.NotContains(t, cache.items, cacheKey)
+}
+
+func TestRemoveCachedEntryNotRemoveCachedItemIfUsages(t *testing.T) {
+ cache := newSharedConfigsCache()
+ cache.logger = logr.Discard()
+ config := awsConfigMetadata{
+ awsRegion: "test3-region",
+ awsAuthorization: AuthorizationMetadata{
+ TriggerUniqueKey: "test3-key",
+ },
+ }
+ cfg := aws.Config{}
+ cfg.AppID = "test3-app"
+ cacheKey := cache.getCacheKey(config.awsAuthorization)
+ cache.items[cacheKey] = cacheEntry{
+ config: &cfg,
+ usages: map[string]bool{
+ config.awsAuthorization.TriggerUniqueKey: true,
+ "other-usage": true,
+ },
+ }
+ cache.RemoveCachedEntry(config.awsAuthorization)
+ assert.Contains(t, cache.items, cacheKey)
+}
diff --git a/pkg/scalers/aws_cloudwatch_scaler.go b/pkg/scalers/aws_cloudwatch_scaler.go
index deea194e4eb..a140557e22f 100644
--- a/pkg/scalers/aws_cloudwatch_scaler.go
+++ b/pkg/scalers/aws_cloudwatch_scaler.go
@@ -13,6 +13,8 @@ import (
"github.com/go-logr/logr"
v2 "k8s.io/api/autoscaling/v2"
"k8s.io/metrics/pkg/apis/external_metrics"
+
+ awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws"
)
const (
@@ -49,9 +51,9 @@ type awsCloudwatchMetadata struct {
awsRegion string
awsEndpoint string
- awsAuthorization awsAuthorizationMetadata
+ awsAuthorization awsutils.AuthorizationMetadata
- scalerIndex int
+ triggerIndex int
}
// NewAwsCloudwatchScaler creates a new awsCloudwatchScaler
@@ -111,7 +113,7 @@ func getFloatMetadataValue(metadata map[string]string, key string, required bool
}
func createCloudwatchClient(ctx context.Context, metadata *awsCloudwatchMetadata) (*cloudwatch.Client, error) {
- cfg, err := getAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization)
+ cfg, err := awsutils.GetAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization)
if err != nil {
return nil, err
@@ -230,13 +232,13 @@ func parseAwsCloudwatchMetadata(config *ScalerConfig) (*awsCloudwatchMetadata, e
meta.awsEndpoint = val
}
- awsAuthorization, err := getAwsAuthorization(config.AuthParams, config.TriggerMetadata, config.ResolvedEnv)
+ awsAuthorization, err := awsutils.GetAwsAuthorization(config.TriggerUniqueKey, config.PodIdentity, config.TriggerMetadata, config.AuthParams, config.ResolvedEnv)
if err != nil {
return nil, err
}
meta.awsAuthorization = awsAuthorization
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -303,7 +305,7 @@ func (s *awsCloudwatchScaler) GetMetricsAndActivity(ctx context.Context, metricN
func (s *awsCloudwatchScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, "aws-cloudwatch"),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, "aws-cloudwatch"),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.targetMetricValue),
}
@@ -312,6 +314,7 @@ func (s *awsCloudwatchScaler) GetMetricSpecForScaling(context.Context) []v2.Metr
}
func (s *awsCloudwatchScaler) Close(context.Context) error {
+ awsutils.ClearAwsConfig(s.metadata.awsAuthorization)
return nil
}
diff --git a/pkg/scalers/aws_cloudwatch_scaler_test.go b/pkg/scalers/aws_cloudwatch_scaler_test.go
index 5abdfd393fc..1f57666e81e 100644
--- a/pkg/scalers/aws_cloudwatch_scaler_test.go
+++ b/pkg/scalers/aws_cloudwatch_scaler_test.go
@@ -10,6 +10,8 @@ import (
"github.com/aws/aws-sdk-go-v2/service/cloudwatch/types"
"github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
+
+ awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws"
)
const (
@@ -40,7 +42,7 @@ type parseAWSCloudwatchMetadataTestData struct {
type awsCloudwatchMetricIdentifier struct {
metadataTestData *parseAWSCloudwatchMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -375,8 +377,8 @@ var awsCloudwatchGetMetricTestData = []awsCloudwatchMetadata{
metricStatPeriod: 60,
metricEndTimeOffset: 60,
awsRegion: "us-west-2",
- awsAuthorization: awsAuthorizationMetadata{podIdentityOwner: false},
- scalerIndex: 0,
+ awsAuthorization: awsutils.AuthorizationMetadata{PodIdentityOwner: false},
+ triggerIndex: 0,
},
{
namespace: "Custom",
@@ -391,8 +393,8 @@ var awsCloudwatchGetMetricTestData = []awsCloudwatchMetadata{
metricStatPeriod: 60,
metricEndTimeOffset: 60,
awsRegion: "us-west-2",
- awsAuthorization: awsAuthorizationMetadata{podIdentityOwner: false},
- scalerIndex: 0,
+ awsAuthorization: awsutils.AuthorizationMetadata{PodIdentityOwner: false},
+ triggerIndex: 0,
},
{
namespace: "Custom",
@@ -407,8 +409,8 @@ var awsCloudwatchGetMetricTestData = []awsCloudwatchMetadata{
metricStatPeriod: 60,
metricEndTimeOffset: 60,
awsRegion: "us-west-2",
- awsAuthorization: awsAuthorizationMetadata{podIdentityOwner: false},
- scalerIndex: 0,
+ awsAuthorization: awsutils.AuthorizationMetadata{PodIdentityOwner: false},
+ triggerIndex: 0,
},
{
namespace: "Custom",
@@ -423,8 +425,8 @@ var awsCloudwatchGetMetricTestData = []awsCloudwatchMetadata{
metricStatPeriod: 60,
metricEndTimeOffset: 60,
awsRegion: "us-west-2",
- awsAuthorization: awsAuthorizationMetadata{podIdentityOwner: false},
- scalerIndex: 0,
+ awsAuthorization: awsutils.AuthorizationMetadata{PodIdentityOwner: false},
+ triggerIndex: 0,
},
{
namespace: "Custom",
@@ -438,8 +440,8 @@ var awsCloudwatchGetMetricTestData = []awsCloudwatchMetadata{
metricStatPeriod: 60,
metricEndTimeOffset: 60,
awsRegion: "us-west-2",
- awsAuthorization: awsAuthorizationMetadata{podIdentityOwner: false},
- scalerIndex: 0,
+ awsAuthorization: awsutils.AuthorizationMetadata{PodIdentityOwner: false},
+ triggerIndex: 0,
},
}
@@ -482,7 +484,7 @@ func TestCloudwatchParseMetadata(t *testing.T) {
func TestAWSCloudwatchGetMetricSpecForScaling(t *testing.T) {
for _, testData := range awsCloudwatchMetricIdentifiers {
ctx := context.Background()
- meta, err := parseAwsCloudwatchMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testAWSCloudwatchResolvedEnv, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex})
+ meta, err := parseAwsCloudwatchMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testAWSCloudwatchResolvedEnv, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/aws_common.go b/pkg/scalers/aws_common.go
deleted file mode 100644
index 9cad15ce3d3..00000000000
--- a/pkg/scalers/aws_common.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package scalers
-
-import (
- "context"
- "errors"
- "fmt"
-
- "github.com/aws/aws-sdk-go-v2/aws"
- "github.com/aws/aws-sdk-go-v2/config"
- "github.com/aws/aws-sdk-go-v2/credentials"
- "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
- "github.com/aws/aws-sdk-go-v2/service/sts"
-)
-
-// ErrAwsNoAccessKey is returned when awsAccessKeyID is missing.
-var ErrAwsNoAccessKey = errors.New("awsAccessKeyID not found")
-
-type awsAuthorizationMetadata struct {
- awsRoleArn string
-
- awsAccessKeyID string
- awsSecretAccessKey string
- awsSessionToken string
-
- podIdentityOwner bool
-}
-
-type awsConfigMetadata struct {
- awsRegion string
- awsAuthorization awsAuthorizationMetadata
-}
-
-func getAwsConfig(ctx context.Context, awsRegion string, awsAuthorization awsAuthorizationMetadata) (*aws.Config, error) {
- metadata := &awsConfigMetadata{
- awsRegion: awsRegion,
- awsAuthorization: awsAuthorization,
- }
-
- configOptions := make([]func(*config.LoadOptions) error, 0)
- configOptions = append(configOptions, config.WithRegion(metadata.awsRegion))
- cfg, err := config.LoadDefaultConfig(ctx, configOptions...)
- if err != nil {
- return nil, err
- }
- if !metadata.awsAuthorization.podIdentityOwner {
- return &cfg, nil
- }
- if metadata.awsAuthorization.awsAccessKeyID != "" && metadata.awsAuthorization.awsSecretAccessKey != "" {
- staticCredentialsProvider := aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(metadata.awsAuthorization.awsAccessKeyID, metadata.awsAuthorization.awsSecretAccessKey, metadata.awsAuthorization.awsSessionToken))
- cfg.Credentials = staticCredentialsProvider
- }
-
- if metadata.awsAuthorization.awsRoleArn != "" {
- stsSvc := sts.NewFromConfig(cfg)
- stsCredentialProvider := stscreds.NewAssumeRoleProvider(stsSvc, metadata.awsAuthorization.awsRoleArn, func(options *stscreds.AssumeRoleOptions) {})
- cfg.Credentials = aws.NewCredentialsCache(stsCredentialProvider)
- }
-
- return &cfg, err
-}
-
-func getAwsAuthorization(authParams, metadata, resolvedEnv map[string]string) (awsAuthorizationMetadata, error) {
- meta := awsAuthorizationMetadata{}
-
- if metadata["identityOwner"] == "operator" {
- meta.podIdentityOwner = false
- } else if metadata["identityOwner"] == "" || metadata["identityOwner"] == "pod" {
- meta.podIdentityOwner = true
- switch {
- case authParams["awsRoleArn"] != "":
- meta.awsRoleArn = authParams["awsRoleArn"]
- case (authParams["awsAccessKeyID"] != "" || authParams["awsAccessKeyId"] != "") && authParams["awsSecretAccessKey"] != "":
- meta.awsAccessKeyID = authParams["awsAccessKeyID"]
- if meta.awsAccessKeyID == "" {
- meta.awsAccessKeyID = authParams["awsAccessKeyId"]
- }
- meta.awsSecretAccessKey = authParams["awsSecretAccessKey"]
- meta.awsSessionToken = authParams["awsSessionToken"]
- default:
- if metadata["awsAccessKeyID"] != "" {
- meta.awsAccessKeyID = metadata["awsAccessKeyID"]
- } else if metadata["awsAccessKeyIDFromEnv"] != "" {
- meta.awsAccessKeyID = resolvedEnv[metadata["awsAccessKeyIDFromEnv"]]
- }
-
- if len(meta.awsAccessKeyID) == 0 {
- return meta, ErrAwsNoAccessKey
- }
-
- if metadata["awsSecretAccessKeyFromEnv"] != "" {
- meta.awsSecretAccessKey = resolvedEnv[metadata["awsSecretAccessKeyFromEnv"]]
- }
-
- if len(meta.awsSecretAccessKey) == 0 {
- return meta, fmt.Errorf("awsSecretAccessKey not found")
- }
- }
- }
-
- return meta, nil
-}
diff --git a/pkg/scalers/aws_dynamodb_scaler.go b/pkg/scalers/aws_dynamodb_scaler.go
index 05b173fd1ab..d5afb95eab6 100644
--- a/pkg/scalers/aws_dynamodb_scaler.go
+++ b/pkg/scalers/aws_dynamodb_scaler.go
@@ -15,6 +15,7 @@ import (
v2 "k8s.io/api/autoscaling/v2"
"k8s.io/metrics/pkg/apis/external_metrics"
+ awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws"
kedautil "github.com/kedacore/keda/v2/pkg/util"
)
@@ -35,8 +36,8 @@ type awsDynamoDBMetadata struct {
indexName string
targetValue int64
activationTargetValue int64
- awsAuthorization awsAuthorizationMetadata
- scalerIndex int
+ awsAuthorization awsutils.AuthorizationMetadata
+ triggerIndex int
metricName string
}
@@ -170,22 +171,22 @@ func parseAwsDynamoDBMetadata(config *ScalerConfig) (*awsDynamoDBMetadata, error
meta.activationTargetValue = 0
}
- auth, err := getAwsAuthorization(config.AuthParams, config.TriggerMetadata, config.ResolvedEnv)
+ auth, err := awsutils.GetAwsAuthorization(config.TriggerUniqueKey, config.PodIdentity, config.TriggerMetadata, config.AuthParams, config.ResolvedEnv)
if err != nil {
return nil, err
}
meta.awsAuthorization = auth
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
- meta.metricName = GenerateMetricNameWithIndex(config.ScalerIndex,
+ meta.metricName = GenerateMetricNameWithIndex(config.TriggerIndex,
kedautil.NormalizeString(fmt.Sprintf("aws-dynamodb-%s", meta.tableName)))
return &meta, nil
}
func createDynamoDBClient(ctx context.Context, metadata *awsDynamoDBMetadata) (*dynamodb.Client, error) {
- cfg, err := getAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization)
+ cfg, err := awsutils.GetAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization)
if err != nil {
return nil, err
}
@@ -224,6 +225,7 @@ func (s *awsDynamoDBScaler) GetMetricSpecForScaling(context.Context) []v2.Metric
}
func (s *awsDynamoDBScaler) Close(context.Context) error {
+ awsutils.ClearAwsConfig(s.metadata.awsAuthorization)
return nil
}
diff --git a/pkg/scalers/aws_dynamodb_scaler_test.go b/pkg/scalers/aws_dynamodb_scaler_test.go
index 3ed599f5260..1ebbdd2b767 100644
--- a/pkg/scalers/aws_dynamodb_scaler_test.go
+++ b/pkg/scalers/aws_dynamodb_scaler_test.go
@@ -11,6 +11,8 @@ import (
"github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
"github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
+
+ awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws"
)
const (
@@ -167,7 +169,7 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{
"targetValue": "3",
},
authParams: map[string]string{},
- expectedError: ErrAwsNoAccessKey,
+ expectedError: awsutils.ErrAwsNoAccessKey,
},
{
name: "authentication provided",
@@ -188,12 +190,12 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{
expressionAttributeNames: map[string]string{"#yr": year},
expressionAttributeValues: map[string]types.AttributeValue{":yyyy": yearAttr},
targetValue: 3,
- scalerIndex: 1,
+ triggerIndex: 1,
metricName: "s1-aws-dynamodb-test",
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: "none",
- awsSecretAccessKey: "none",
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: "none",
+ AwsSecretAccessKey: "none",
+ PodIdentityOwner: true,
},
},
},
@@ -218,12 +220,12 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{
expressionAttributeNames: map[string]string{"#yr": year},
expressionAttributeValues: map[string]types.AttributeValue{":yyyy": yearAttr},
targetValue: 3,
- scalerIndex: 1,
+ triggerIndex: 1,
metricName: "s1-aws-dynamodb-test",
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: "none",
- awsSecretAccessKey: "none",
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: "none",
+ AwsSecretAccessKey: "none",
+ PodIdentityOwner: true,
},
},
},
@@ -248,12 +250,12 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{
expressionAttributeValues: map[string]types.AttributeValue{":yyyy": yearAttr},
activationTargetValue: 1,
targetValue: 3,
- scalerIndex: 1,
+ triggerIndex: 1,
metricName: "s1-aws-dynamodb-test",
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: "none",
- awsSecretAccessKey: "none",
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: "none",
+ AwsSecretAccessKey: "none",
+ PodIdentityOwner: true,
},
},
},
@@ -278,12 +280,12 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{
expressionAttributeNames: map[string]string{"#yr": year},
expressionAttributeValues: map[string]types.AttributeValue{":yyyy": yearAttr},
targetValue: 3,
- scalerIndex: 1,
+ triggerIndex: 1,
metricName: "s1-aws-dynamodb-test",
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: "none",
- awsSecretAccessKey: "none",
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: "none",
+ AwsSecretAccessKey: "none",
+ PodIdentityOwner: true,
},
},
},
@@ -296,7 +298,7 @@ func TestParseDynamoMetadata(t *testing.T) {
TriggerMetadata: tc.metadata,
AuthParams: tc.authParams,
ResolvedEnv: tc.resolvedEnv,
- ScalerIndex: 1,
+ TriggerIndex: 1,
})
if tc.expectedError != nil {
assert.ErrorContains(t, err, tc.expectedError.Error())
diff --git a/pkg/scalers/aws_dynamodb_streams_scaler.go b/pkg/scalers/aws_dynamodb_streams_scaler.go
index 40c5b32a641..bd06a44d4c8 100644
--- a/pkg/scalers/aws_dynamodb_streams_scaler.go
+++ b/pkg/scalers/aws_dynamodb_streams_scaler.go
@@ -12,6 +12,7 @@ import (
v2 "k8s.io/api/autoscaling/v2"
"k8s.io/metrics/pkg/apis/external_metrics"
+ awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws"
kedautil "github.com/kedacore/keda/v2/pkg/util"
)
@@ -34,8 +35,8 @@ type awsDynamoDBStreamsMetadata struct {
tableName string
awsRegion string
awsEndpoint string
- awsAuthorization awsAuthorizationMetadata
- scalerIndex int
+ awsAuthorization awsutils.AuthorizationMetadata
+ triggerIndex int
}
// NewAwsDynamoDBStreamsScaler creates a new awsDynamoDBStreamsScaler
@@ -111,19 +112,19 @@ func parseAwsDynamoDBStreamsMetadata(config *ScalerConfig, logger logr.Logger) (
}
}
- auth, err := getAwsAuthorization(config.AuthParams, config.TriggerMetadata, config.ResolvedEnv)
+ auth, err := awsutils.GetAwsAuthorization(config.TriggerUniqueKey, config.PodIdentity, config.TriggerMetadata, config.AuthParams, config.ResolvedEnv)
if err != nil {
return nil, err
}
meta.awsAuthorization = auth
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
func createClientsForDynamoDBStreamsScaler(ctx context.Context, metadata *awsDynamoDBStreamsMetadata) (*dynamodb.Client, *dynamodbstreams.Client, error) {
- cfg, err := getAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization)
+ cfg, err := awsutils.GetAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization)
if err != nil {
return nil, nil, err
}
@@ -167,13 +168,14 @@ func getDynamoDBStreamsArn(ctx context.Context, db dynamodb.DescribeTableAPIClie
}
func (s *awsDynamoDBStreamsScaler) Close(_ context.Context) error {
+ awsutils.ClearAwsConfig(s.metadata.awsAuthorization)
return nil
}
func (s *awsDynamoDBStreamsScaler) GetMetricSpecForScaling(_ context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("aws-dynamodb-streams-%s", s.metadata.tableName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("aws-dynamodb-streams-%s", s.metadata.tableName))),
},
Target: GetMetricTarget(s.metricType, s.metadata.targetShardCount),
}
diff --git a/pkg/scalers/aws_dynamodb_streams_scaler_test.go b/pkg/scalers/aws_dynamodb_streams_scaler_test.go
index 501cbf28a21..e007dec019c 100644
--- a/pkg/scalers/aws_dynamodb_streams_scaler_test.go
+++ b/pkg/scalers/aws_dynamodb_streams_scaler_test.go
@@ -15,6 +15,8 @@ import (
"github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
"k8s.io/metrics/pkg/apis/external_metrics"
+
+ awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws"
)
const (
@@ -47,17 +49,17 @@ func generateTestDynamoDBStreamShards(shardNum int64) []types.Shard {
}
type parseAwsDynamoDBStreamsMetadataTestData struct {
- metadata map[string]string
- expected *awsDynamoDBStreamsMetadata
- authParams map[string]string
- isError bool
- comment string
- scalerIndex int
+ metadata map[string]string
+ expected *awsDynamoDBStreamsMetadata
+ authParams map[string]string
+ isError bool
+ comment string
+ triggerIndex int
}
type awsDynamoDBStreamsMetricIdentifier struct {
metadataTestData *parseAwsDynamoDBStreamsMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -136,16 +138,16 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{
activationTargetShardCount: 1,
tableName: testAWSDynamoDBSmallTable,
awsRegion: testAWSDynamoDBStreamsRegion,
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID,
- awsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey,
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID,
+ AwsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey,
+ PodIdentityOwner: true,
},
- scalerIndex: 0,
+ triggerIndex: 0,
},
- isError: false,
- comment: "properly formed dynamodb table name and region",
- scalerIndex: 0,
+ isError: false,
+ comment: "properly formed dynamodb table name and region",
+ triggerIndex: 0,
},
{
metadata: map[string]string{
@@ -161,38 +163,38 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{
tableName: testAWSDynamoDBSmallTable,
awsRegion: testAWSDynamoDBStreamsRegion,
awsEndpoint: testAWSDynamoDBStreamsEndpoint,
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID,
- awsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey,
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID,
+ AwsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey,
+ PodIdentityOwner: true,
},
- scalerIndex: 0,
+ triggerIndex: 0,
},
- isError: false,
- comment: "properly formed dynamodb table name and region",
- scalerIndex: 0,
+ isError: false,
+ comment: "properly formed dynamodb table name and region",
+ triggerIndex: 0,
},
{
metadata: map[string]string{
"tableName": "",
"shardCount": "2",
"awsRegion": testAWSDynamoDBStreamsRegion},
- authParams: testAWSKinesisAuthentication,
- expected: &awsDynamoDBStreamsMetadata{},
- isError: true,
- comment: "missing dynamodb table name",
- scalerIndex: 1,
+ authParams: testAWSKinesisAuthentication,
+ expected: &awsDynamoDBStreamsMetadata{},
+ isError: true,
+ comment: "missing dynamodb table name",
+ triggerIndex: 1,
},
{
metadata: map[string]string{
"tableName": testAWSDynamoDBSmallTable,
"shardCount": "2",
"awsRegion": ""},
- authParams: testAWSKinesisAuthentication,
- expected: &awsDynamoDBStreamsMetadata{},
- isError: true,
- comment: "properly formed dynamodb table name, empty region",
- scalerIndex: 2,
+ authParams: testAWSKinesisAuthentication,
+ expected: &awsDynamoDBStreamsMetadata{},
+ isError: true,
+ comment: "properly formed dynamodb table name, empty region",
+ triggerIndex: 2,
},
{
metadata: map[string]string{
@@ -205,16 +207,16 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{
activationTargetShardCount: defaultActivationTargetDBStreamsShardCount,
tableName: testAWSDynamoDBSmallTable,
awsRegion: testAWSDynamoDBStreamsRegion,
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID,
- awsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey,
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID,
+ AwsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey,
+ PodIdentityOwner: true,
},
- scalerIndex: 3,
+ triggerIndex: 3,
},
- isError: false,
- comment: "properly formed table name and region, empty shard count",
- scalerIndex: 3,
+ isError: false,
+ comment: "properly formed table name and region, empty shard count",
+ triggerIndex: 3,
},
{
metadata: map[string]string{
@@ -226,16 +228,16 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{
targetShardCount: defaultTargetDBStreamsShardCount,
tableName: testAWSDynamoDBSmallTable,
awsRegion: testAWSDynamoDBStreamsRegion,
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID,
- awsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey,
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID,
+ AwsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey,
+ PodIdentityOwner: true,
},
- scalerIndex: 4,
+ triggerIndex: 4,
},
- isError: false,
- comment: "properly formed table name and region, wrong shard count",
- scalerIndex: 4,
+ isError: false,
+ comment: "properly formed table name and region, wrong shard count",
+ triggerIndex: 4,
},
{
metadata: map[string]string{
@@ -246,10 +248,10 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{
"awsAccessKeyID": "",
"awsSecretAccessKey": testAWSDynamoDBStreamsSecretAccessKey,
},
- expected: &awsDynamoDBStreamsMetadata{},
- isError: true,
- comment: "with AWS static credentials from TriggerAuthentication, missing Access Key Id",
- scalerIndex: 5,
+ expected: &awsDynamoDBStreamsMetadata{},
+ isError: true,
+ comment: "with AWS static credentials from TriggerAuthentication, missing Access Key Id",
+ triggerIndex: 5,
},
{metadata: map[string]string{
"tableName": testAWSDynamoDBSmallTable,
@@ -259,10 +261,10 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{
"awsAccessKeyID": testAWSDynamoDBStreamsAccessKeyID,
"awsSecretAccessKey": "",
},
- expected: &awsDynamoDBStreamsMetadata{},
- isError: true,
- comment: "with AWS static credentials from TriggerAuthentication, missing Secret Access Key",
- scalerIndex: 6,
+ expected: &awsDynamoDBStreamsMetadata{},
+ isError: true,
+ comment: "with AWS static credentials from TriggerAuthentication, missing Secret Access Key",
+ triggerIndex: 6,
},
{
metadata: map[string]string{
@@ -278,17 +280,17 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{
targetShardCount: 2,
tableName: testAWSDynamoDBSmallTable,
awsRegion: testAWSDynamoDBStreamsRegion,
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID,
- awsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey,
- awsSessionToken: testAWSDynamoDBStreamsSessionToken,
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID,
+ AwsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey,
+ AwsSessionToken: testAWSDynamoDBStreamsSessionToken,
+ PodIdentityOwner: true,
},
- scalerIndex: 5,
+ triggerIndex: 5,
},
- isError: false,
- comment: "with AWS temporary credentials from TriggerAuthentication",
- scalerIndex: 5,
+ isError: false,
+ comment: "with AWS temporary credentials from TriggerAuthentication",
+ triggerIndex: 5,
},
{
metadata: map[string]string{
@@ -300,10 +302,10 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{
"awsSecretAccessKey": testAWSDynamoDBStreamsSecretAccessKey,
"awsSessionToken": testAWSDynamoDBStreamsSessionToken,
},
- expected: &awsDynamoDBStreamsMetadata{},
- isError: true,
- comment: "with AWS temporary credentials from TriggerAuthentication, missing Access Key Id",
- scalerIndex: 5,
+ expected: &awsDynamoDBStreamsMetadata{},
+ isError: true,
+ comment: "with AWS temporary credentials from TriggerAuthentication, missing Access Key Id",
+ triggerIndex: 5,
},
{metadata: map[string]string{
"tableName": testAWSDynamoDBSmallTable,
@@ -314,10 +316,10 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{
"awsSecretAccessKey": "",
"awsSessionToken": testAWSDynamoDBStreamsSessionToken,
},
- expected: &awsDynamoDBStreamsMetadata{},
- isError: true,
- comment: "with AWS temporary credentials from TriggerAuthentication, missing Secret Access Key",
- scalerIndex: 6,
+ expected: &awsDynamoDBStreamsMetadata{},
+ isError: true,
+ comment: "with AWS temporary credentials from TriggerAuthentication, missing Secret Access Key",
+ triggerIndex: 6,
},
{metadata: map[string]string{
"tableName": testAWSDynamoDBSmallTable,
@@ -330,15 +332,15 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{
targetShardCount: 2,
tableName: testAWSDynamoDBSmallTable,
awsRegion: testAWSDynamoDBStreamsRegion,
- awsAuthorization: awsAuthorizationMetadata{
- awsRoleArn: testAWSDynamoDBStreamsRoleArn,
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsRoleArn: testAWSDynamoDBStreamsRoleArn,
+ PodIdentityOwner: true,
},
- scalerIndex: 7,
+ triggerIndex: 7,
},
- isError: false,
- comment: "with AWS Role from TriggerAuthentication",
- scalerIndex: 7,
+ isError: false,
+ comment: "with AWS Role from TriggerAuthentication",
+ triggerIndex: 7,
},
{metadata: map[string]string{
"tableName": testAWSDynamoDBSmallTable,
@@ -350,14 +352,14 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{
targetShardCount: 2,
tableName: testAWSDynamoDBSmallTable,
awsRegion: testAWSDynamoDBStreamsRegion,
- awsAuthorization: awsAuthorizationMetadata{
- podIdentityOwner: false,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ PodIdentityOwner: false,
},
- scalerIndex: 8,
+ triggerIndex: 8,
},
- isError: false,
- comment: "with AWS Role assigned on KEDA operator itself",
- scalerIndex: 8,
+ isError: false,
+ comment: "with AWS Role assigned on KEDA operator itself",
+ triggerIndex: 8,
},
}
@@ -375,7 +377,7 @@ var awsDynamoDBStreamsGetMetricTestData = []*awsDynamoDBStreamsMetadata{
func TestParseAwsDynamoDBStreamsMetadata(t *testing.T) {
for _, testData := range testAwsDynamoDBStreamMetadata {
- result, err := parseAwsDynamoDBStreamsMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testAwsDynamoDBStreamAuthentication, AuthParams: testData.authParams, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ result, err := parseAwsDynamoDBStreamsMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testAwsDynamoDBStreamAuthentication, AuthParams: testData.authParams, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil && !testData.isError {
t.Errorf("Expected success because %s got error, %s", testData.comment, err)
}
@@ -392,7 +394,7 @@ func TestParseAwsDynamoDBStreamsMetadata(t *testing.T) {
func TestAwsDynamoDBStreamsGetMetricSpecForScaling(t *testing.T) {
for _, testData := range awsDynamoDBStreamMetricIdentifiers {
ctx := context.Background()
- meta, err := parseAwsDynamoDBStreamsMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testAwsDynamoDBStreamAuthentication, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ meta, err := parseAwsDynamoDBStreamsMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testAwsDynamoDBStreamAuthentication, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/aws_kinesis_stream_scaler.go b/pkg/scalers/aws_kinesis_stream_scaler.go
index 95ff02643a1..09ed926784e 100644
--- a/pkg/scalers/aws_kinesis_stream_scaler.go
+++ b/pkg/scalers/aws_kinesis_stream_scaler.go
@@ -11,6 +11,7 @@ import (
v2 "k8s.io/api/autoscaling/v2"
"k8s.io/metrics/pkg/apis/external_metrics"
+ awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws"
kedautil "github.com/kedacore/keda/v2/pkg/util"
)
@@ -44,8 +45,8 @@ type awsKinesisStreamMetadata struct {
streamName string
awsRegion string
awsEndpoint string
- awsAuthorization awsAuthorizationMetadata
- scalerIndex int
+ awsAuthorization awsutils.AuthorizationMetadata
+ triggerIndex int
}
// NewAwsKinesisStreamScaler creates a new awsKinesisStreamScaler
@@ -116,20 +117,20 @@ func parseAwsKinesisStreamMetadata(config *ScalerConfig, logger logr.Logger) (*a
meta.awsEndpoint = val
}
- auth, err := getAwsAuthorization(config.AuthParams, config.TriggerMetadata, config.ResolvedEnv)
+ auth, err := awsutils.GetAwsAuthorization(config.TriggerUniqueKey, config.PodIdentity, config.TriggerMetadata, config.AuthParams, config.ResolvedEnv)
if err != nil {
return nil, err
}
meta.awsAuthorization = auth
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
func createKinesisClient(ctx context.Context, metadata *awsKinesisStreamMetadata) (*kinesis.Client, error) {
- cfg, err := getAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization)
+ cfg, err := awsutils.GetAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization)
if err != nil {
return nil, err
}
@@ -141,13 +142,14 @@ func createKinesisClient(ctx context.Context, metadata *awsKinesisStreamMetadata
}
func (s *awsKinesisStreamScaler) Close(context.Context) error {
+ awsutils.ClearAwsConfig(s.metadata.awsAuthorization)
return nil
}
func (s *awsKinesisStreamScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("aws-kinesis-%s", s.metadata.streamName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("aws-kinesis-%s", s.metadata.streamName))),
},
Target: GetMetricTarget(s.metricType, s.metadata.targetShardCount),
}
diff --git a/pkg/scalers/aws_kinesis_stream_scaler_test.go b/pkg/scalers/aws_kinesis_stream_scaler_test.go
index eef209df679..7bdad47886c 100644
--- a/pkg/scalers/aws_kinesis_stream_scaler_test.go
+++ b/pkg/scalers/aws_kinesis_stream_scaler_test.go
@@ -11,6 +11,8 @@ import (
"github.com/aws/aws-sdk-go-v2/service/kinesis/types"
"github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
+
+ awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws"
)
const (
@@ -30,17 +32,17 @@ var testAWSKinesisAuthentication = map[string]string{
}
type parseAWSKinesisMetadataTestData struct {
- metadata map[string]string
- expected *awsKinesisStreamMetadata
- authParams map[string]string
- isError bool
- comment string
- scalerIndex int
+ metadata map[string]string
+ expected *awsKinesisStreamMetadata
+ authParams map[string]string
+ isError bool
+ comment string
+ triggerIndex int
}
type awsKinesisMetricIdentifier struct {
metadataTestData *parseAWSKinesisMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -78,16 +80,16 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{
activationTargetShardCount: 1,
streamName: testAWSKinesisStreamName,
awsRegion: testAWSRegion,
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: testAWSKinesisAccessKeyID,
- awsSecretAccessKey: testAWSKinesisSecretAccessKey,
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: testAWSKinesisAccessKeyID,
+ AwsSecretAccessKey: testAWSKinesisSecretAccessKey,
+ PodIdentityOwner: true,
},
- scalerIndex: 0,
+ triggerIndex: 0,
},
- isError: false,
- comment: "properly formed stream name and region",
- scalerIndex: 0,
+ isError: false,
+ comment: "properly formed stream name and region",
+ triggerIndex: 0,
},
{
metadata: map[string]string{
@@ -103,38 +105,38 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{
streamName: testAWSKinesisStreamName,
awsRegion: testAWSRegion,
awsEndpoint: testAWSEndpoint,
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: testAWSKinesisAccessKeyID,
- awsSecretAccessKey: testAWSKinesisSecretAccessKey,
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: testAWSKinesisAccessKeyID,
+ AwsSecretAccessKey: testAWSKinesisSecretAccessKey,
+ PodIdentityOwner: true,
},
- scalerIndex: 0,
+ triggerIndex: 0,
},
- isError: false,
- comment: "properly formed stream name and region with custom endpoint",
- scalerIndex: 0,
+ isError: false,
+ comment: "properly formed stream name and region with custom endpoint",
+ triggerIndex: 0,
},
{
metadata: map[string]string{
"streamName": "",
"shardCount": "2",
"awsRegion": testAWSRegion},
- authParams: testAWSKinesisAuthentication,
- expected: &awsKinesisStreamMetadata{},
- isError: true,
- comment: "missing stream name",
- scalerIndex: 1,
+ authParams: testAWSKinesisAuthentication,
+ expected: &awsKinesisStreamMetadata{},
+ isError: true,
+ comment: "missing stream name",
+ triggerIndex: 1,
},
{
metadata: map[string]string{
"streamName": testAWSKinesisStreamName,
"shardCount": "2",
"awsRegion": ""},
- authParams: testAWSKinesisAuthentication,
- expected: &awsKinesisStreamMetadata{},
- isError: true,
- comment: "properly formed stream name, empty region",
- scalerIndex: 2,
+ authParams: testAWSKinesisAuthentication,
+ expected: &awsKinesisStreamMetadata{},
+ isError: true,
+ comment: "properly formed stream name, empty region",
+ triggerIndex: 2,
},
{
metadata: map[string]string{
@@ -148,16 +150,16 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{
activationTargetShardCount: activationTargetShardCountDefault,
streamName: testAWSKinesisStreamName,
awsRegion: testAWSRegion,
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: testAWSKinesisAccessKeyID,
- awsSecretAccessKey: testAWSKinesisSecretAccessKey,
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: testAWSKinesisAccessKeyID,
+ AwsSecretAccessKey: testAWSKinesisSecretAccessKey,
+ PodIdentityOwner: true,
},
- scalerIndex: 3,
+ triggerIndex: 3,
},
- isError: false,
- comment: "properly formed stream name and region, empty shard count",
- scalerIndex: 3,
+ isError: false,
+ comment: "properly formed stream name and region, empty shard count",
+ triggerIndex: 3,
},
{
metadata: map[string]string{
@@ -169,16 +171,16 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{
targetShardCount: 2,
streamName: testAWSKinesisStreamName,
awsRegion: testAWSRegion,
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: testAWSKinesisAccessKeyID,
- awsSecretAccessKey: testAWSKinesisSecretAccessKey,
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: testAWSKinesisAccessKeyID,
+ AwsSecretAccessKey: testAWSKinesisSecretAccessKey,
+ PodIdentityOwner: true,
},
- scalerIndex: 4,
+ triggerIndex: 4,
},
- isError: false,
- comment: "properly formed stream name and region, wrong shard count",
- scalerIndex: 4,
+ isError: false,
+ comment: "properly formed stream name and region, wrong shard count",
+ triggerIndex: 4,
},
{
metadata: map[string]string{
@@ -189,10 +191,10 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{
"awsAccessKeyID": "",
"awsSecretAccessKey": testAWSKinesisSecretAccessKey,
},
- expected: &awsKinesisStreamMetadata{},
- isError: true,
- comment: "with AWS static credentials from TriggerAuthentication, missing Access Key Id",
- scalerIndex: 5,
+ expected: &awsKinesisStreamMetadata{},
+ isError: true,
+ comment: "with AWS static credentials from TriggerAuthentication, missing Access Key Id",
+ triggerIndex: 5,
},
{metadata: map[string]string{
"streamName": testAWSKinesisStreamName,
@@ -202,10 +204,10 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{
"awsAccessKeyID": testAWSKinesisAccessKeyID,
"awsSecretAccessKey": "",
},
- expected: &awsKinesisStreamMetadata{},
- isError: true,
- comment: "with AWS static credentials from TriggerAuthentication, missing Secret Access Key",
- scalerIndex: 6,
+ expected: &awsKinesisStreamMetadata{},
+ isError: true,
+ comment: "with AWS static credentials from TriggerAuthentication, missing Secret Access Key",
+ triggerIndex: 6,
},
{
metadata: map[string]string{
@@ -221,17 +223,17 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{
targetShardCount: 2,
streamName: testAWSKinesisStreamName,
awsRegion: testAWSRegion,
- awsAuthorization: awsAuthorizationMetadata{
- awsAccessKeyID: testAWSKinesisAccessKeyID,
- awsSecretAccessKey: testAWSKinesisSecretAccessKey,
- awsSessionToken: testAWSKinesisSessionToken,
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsAccessKeyID: testAWSKinesisAccessKeyID,
+ AwsSecretAccessKey: testAWSKinesisSecretAccessKey,
+ AwsSessionToken: testAWSKinesisSessionToken,
+ PodIdentityOwner: true,
},
- scalerIndex: 5,
+ triggerIndex: 5,
},
- isError: false,
- comment: "with AWS temporary credentials from TriggerAuthentication",
- scalerIndex: 5,
+ isError: false,
+ comment: "with AWS temporary credentials from TriggerAuthentication",
+ triggerIndex: 5,
},
{
metadata: map[string]string{
@@ -243,10 +245,10 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{
"awsSecretAccessKey": testAWSKinesisSecretAccessKey,
"awsSessionToken": testAWSKinesisSessionToken,
},
- expected: &awsKinesisStreamMetadata{},
- isError: true,
- comment: "with AWS temporary credentials from TriggerAuthentication, missing Access Key Id",
- scalerIndex: 5,
+ expected: &awsKinesisStreamMetadata{},
+ isError: true,
+ comment: "with AWS temporary credentials from TriggerAuthentication, missing Access Key Id",
+ triggerIndex: 5,
},
{metadata: map[string]string{
"streamName": testAWSKinesisStreamName,
@@ -257,10 +259,10 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{
"awsSecretAccessKey": "",
"awsSessionToken": testAWSKinesisSessionToken,
},
- expected: &awsKinesisStreamMetadata{},
- isError: true,
- comment: "with AWS temporary credentials from TriggerAuthentication, missing Secret Access Key",
- scalerIndex: 6,
+ expected: &awsKinesisStreamMetadata{},
+ isError: true,
+ comment: "with AWS temporary credentials from TriggerAuthentication, missing Secret Access Key",
+ triggerIndex: 6,
},
{metadata: map[string]string{
"streamName": testAWSKinesisStreamName,
@@ -273,15 +275,15 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{
targetShardCount: 2,
streamName: testAWSKinesisStreamName,
awsRegion: testAWSRegion,
- awsAuthorization: awsAuthorizationMetadata{
- awsRoleArn: testAWSKinesisRoleArn,
- podIdentityOwner: true,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ AwsRoleArn: testAWSKinesisRoleArn,
+ PodIdentityOwner: true,
},
- scalerIndex: 7,
+ triggerIndex: 7,
},
- isError: false,
- comment: "with AWS Role from TriggerAuthentication",
- scalerIndex: 7,
+ isError: false,
+ comment: "with AWS Role from TriggerAuthentication",
+ triggerIndex: 7,
},
{metadata: map[string]string{
"streamName": testAWSKinesisStreamName,
@@ -293,14 +295,14 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{
targetShardCount: 2,
streamName: testAWSKinesisStreamName,
awsRegion: testAWSRegion,
- awsAuthorization: awsAuthorizationMetadata{
- podIdentityOwner: false,
+ awsAuthorization: awsutils.AuthorizationMetadata{
+ PodIdentityOwner: false,
},
- scalerIndex: 8,
+ triggerIndex: 8,
},
- isError: false,
- comment: "with AWS Role assigned on KEDA operator itself",
- scalerIndex: 8,
+ isError: false,
+ comment: "with AWS Role assigned on KEDA operator itself",
+ triggerIndex: 8,
},
}
@@ -316,7 +318,7 @@ var awsKinesisGetMetricTestData = []*awsKinesisStreamMetadata{
func TestKinesisParseMetadata(t *testing.T) {
for _, testData := range testAWSKinesisMetadata {
- result, err := parseAwsKinesisStreamMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testAWSKinesisAuthentication, AuthParams: testData.authParams, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ result, err := parseAwsKinesisStreamMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testAWSKinesisAuthentication, AuthParams: testData.authParams, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil && !testData.isError {
t.Errorf("Expected success because %s got error, %s", testData.comment, err)
}
@@ -333,7 +335,7 @@ func TestKinesisParseMetadata(t *testing.T) {
func TestAWSKinesisGetMetricSpecForScaling(t *testing.T) {
for _, testData := range awsKinesisMetricIdentifiers {
ctx := context.Background()
- meta, err := parseAwsKinesisStreamMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testAWSKinesisAuthentication, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ meta, err := parseAwsKinesisStreamMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testAWSKinesisAuthentication, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/aws_sqs_queue_scaler.go b/pkg/scalers/aws_sqs_queue_scaler.go
index 40e39217d10..f90cdb7f353 100644
--- a/pkg/scalers/aws_sqs_queue_scaler.go
+++ b/pkg/scalers/aws_sqs_queue_scaler.go
@@ -14,6 +14,7 @@ import (
v2 "k8s.io/api/autoscaling/v2"
"k8s.io/metrics/pkg/apis/external_metrics"
+ awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws"
kedautil "github.com/kedacore/keda/v2/pkg/util"
)
@@ -38,8 +39,8 @@ type awsSqsQueueMetadata struct {
queueName string
awsRegion string
awsEndpoint string
- awsAuthorization awsAuthorizationMetadata
- scalerIndex int
+ awsAuthorization awsutils.AuthorizationMetadata
+ triggerIndex int
scaleOnInFlight bool
scaleOnDelayed bool
awsSqsQueueMetricNames []types.QueueAttributeName
@@ -175,20 +176,20 @@ func parseAwsSqsQueueMetadata(config *ScalerConfig, logger logr.Logger) (*awsSqs
meta.awsEndpoint = val
}
- auth, err := getAwsAuthorization(config.AuthParams, config.TriggerMetadata, config.ResolvedEnv)
+ auth, err := awsutils.GetAwsAuthorization(config.TriggerUniqueKey, config.PodIdentity, config.TriggerMetadata, config.AuthParams, config.ResolvedEnv)
if err != nil {
return nil, err
}
meta.awsAuthorization = auth
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
func createSqsClient(ctx context.Context, metadata *awsSqsQueueMetadata) (*sqs.Client, error) {
- cfg, err := getAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization)
+ cfg, err := awsutils.GetAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization)
if err != nil {
return nil, err
}
@@ -200,13 +201,14 @@ func createSqsClient(ctx context.Context, metadata *awsSqsQueueMetadata) (*sqs.C
}
func (s *awsSqsQueueScaler) Close(context.Context) error {
+ awsutils.ClearAwsConfig(s.metadata.awsAuthorization)
return nil
}
func (s *awsSqsQueueScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("aws-sqs-%s", s.metadata.queueName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("aws-sqs-%s", s.metadata.queueName))),
},
Target: GetMetricTarget(s.metricType, s.metadata.targetQueueLength),
}
diff --git a/pkg/scalers/aws_sqs_queue_scaler_test.go b/pkg/scalers/aws_sqs_queue_scaler_test.go
index 7095a7b7ee1..cf3a51766a5 100644
--- a/pkg/scalers/aws_sqs_queue_scaler_test.go
+++ b/pkg/scalers/aws_sqs_queue_scaler_test.go
@@ -51,7 +51,7 @@ type parseAWSSQSMetadataTestData struct {
type awsSQSMetricIdentifier struct {
metadataTestData *parseAWSSQSMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -402,7 +402,7 @@ func TestSQSParseMetadata(t *testing.T) {
func TestAWSSQSGetMetricSpecForScaling(t *testing.T) {
for _, testData := range awsSQSMetricIdentifiers {
ctx := context.Background()
- meta, err := parseAwsSqsQueueMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ meta, err := parseAwsSqsQueueMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
@@ -418,7 +418,7 @@ func TestAWSSQSGetMetricSpecForScaling(t *testing.T) {
func TestAWSSQSScalerGetMetrics(t *testing.T) {
for index, testData := range awsSQSGetMetricTestData {
- meta, err := parseAwsSqsQueueMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testData.resolvedEnv, AuthParams: testData.authParams, ScalerIndex: index}, logr.Discard())
+ meta, err := parseAwsSqsQueueMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testData.resolvedEnv, AuthParams: testData.authParams, TriggerIndex: index}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/azure/azure_blob.go b/pkg/scalers/azure/azure_blob.go
index 3314df79d57..2e88eadd4db 100644
--- a/pkg/scalers/azure/azure_blob.go
+++ b/pkg/scalers/azure/azure_blob.go
@@ -35,7 +35,7 @@ type BlobMetadata struct {
Connection string
AccountName string
EndpointSuffix string
- ScalerIndex int
+ TriggerIndex int
GlobPattern *glob.Glob
}
diff --git a/pkg/scalers/azure_app_insights_scaler.go b/pkg/scalers/azure_app_insights_scaler.go
index b4a164dd58f..59d5bb0ccb0 100644
--- a/pkg/scalers/azure_app_insights_scaler.go
+++ b/pkg/scalers/azure_app_insights_scaler.go
@@ -35,7 +35,7 @@ type azureAppInsightsMetadata struct {
azureAppInsightsInfo azure.AppInsightsInfo
targetValue float64
activationTargetValue float64
- scalerIndex int
+ triggerIndex int
// sometimes we should consider there is an error we can accept
// default value is true/t, to ignore the null value returned from prometheus
// change to false/f if you can not accept prometheus returning null values
@@ -184,7 +184,7 @@ func parseAzureAppInsightsMetadata(config *ScalerConfig, logger logr.Logger) (*a
meta.azureAppInsightsInfo.ClientID = clientID
meta.azureAppInsightsInfo.ClientPassword = clientPassword
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -196,7 +196,7 @@ func (s *azureAppInsightsScaler) Close(context.Context) error {
func (s *azureAppInsightsScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-app-insights-%s", s.metadata.azureAppInsightsInfo.MetricID))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-app-insights-%s", s.metadata.azureAppInsightsInfo.MetricID))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.targetValue),
}
diff --git a/pkg/scalers/azure_app_insights_scaler_test.go b/pkg/scalers/azure_app_insights_scaler_test.go
index 41500918e11..acdc96ce513 100644
--- a/pkg/scalers/azure_app_insights_scaler_test.go
+++ b/pkg/scalers/azure_app_insights_scaler_test.go
@@ -257,11 +257,11 @@ func TestNewAzureAppInsightsScaler(t *testing.T) {
}
func TestAzureAppInsightsGetMetricSpecForScaling(t *testing.T) {
- scalerIndex := 0
+ triggerIndex := 0
for _, testData := range azureAppInsightsScalerData {
ctx := context.Background()
if !testData.isError {
- testData.config.ScalerIndex = scalerIndex
+ testData.config.TriggerIndex = triggerIndex
meta, err := parseAzureAppInsightsMetadata(&testData.config, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
@@ -273,11 +273,11 @@ func TestAzureAppInsightsGetMetricSpecForScaling(t *testing.T) {
metricSpec := mockAzureAppInsightsScaler.GetMetricSpecForScaling(ctx)
metricName := metricSpec[0].External.Metric.Name
- expectedName := fmt.Sprintf("s%d-azure-app-insights-%s", scalerIndex, strings.ReplaceAll(testData.config.TriggerMetadata["metricId"], "/", "-"))
+ expectedName := fmt.Sprintf("s%d-azure-app-insights-%s", triggerIndex, strings.ReplaceAll(testData.config.TriggerMetadata["metricId"], "/", "-"))
if metricName != expectedName {
t.Errorf("Wrong External metric name. expected: %s, actual: %s", expectedName, metricName)
}
- scalerIndex++
+ triggerIndex++
}
}
}
diff --git a/pkg/scalers/azure_blob_scaler.go b/pkg/scalers/azure_blob_scaler.go
index 55c2b9bcf83..af61f52c269 100644
--- a/pkg/scalers/azure_blob_scaler.go
+++ b/pkg/scalers/azure_blob_scaler.go
@@ -168,7 +168,7 @@ func parseAzureBlobMetadata(config *ScalerConfig, logger logr.Logger) (*azure.Bl
return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("pod identity %s not supported for azure storage blobs", config.PodIdentity.Provider)
}
- meta.ScalerIndex = config.ScalerIndex
+ meta.TriggerIndex = config.TriggerIndex
return &meta, config.PodIdentity, nil
}
@@ -183,7 +183,7 @@ func (s *azureBlobScaler) Close(context.Context) error {
func (s *azureBlobScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.ScalerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-blob-%s", s.metadata.BlobContainerName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.TriggerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-blob-%s", s.metadata.BlobContainerName))),
},
Target: GetMetricTarget(s.metricType, s.metadata.TargetBlobCount),
}
diff --git a/pkg/scalers/azure_blob_scaler_test.go b/pkg/scalers/azure_blob_scaler_test.go
index abe2bf481e6..de40e5e7995 100644
--- a/pkg/scalers/azure_blob_scaler_test.go
+++ b/pkg/scalers/azure_blob_scaler_test.go
@@ -40,7 +40,7 @@ type parseAzBlobMetadataTestData struct {
type azBlobMetricIdentifier struct {
metadataTestData *parseAzBlobMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -127,7 +127,7 @@ func TestAzBlobGetMetricSpecForScaling(t *testing.T) {
ctx := context.Background()
meta, podIdentity, err := parseAzureBlobMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata,
ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams,
- PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/azure_data_explorer_scaler.go b/pkg/scalers/azure_data_explorer_scaler.go
index 9ffef7eeb27..f9c1c31965e 100644
--- a/pkg/scalers/azure_data_explorer_scaler.go
+++ b/pkg/scalers/azure_data_explorer_scaler.go
@@ -118,7 +118,7 @@ func parseAzureDataExplorerMetadata(config *ScalerConfig, logger logr.Logger) (*
}
// Generate metricName.
- metadata.MetricName = GenerateMetricNameWithIndex(config.ScalerIndex, kedautil.NormalizeString(fmt.Sprintf("%s-%s", adxName, metadata.DatabaseName)))
+ metadata.MetricName = GenerateMetricNameWithIndex(config.TriggerIndex, kedautil.NormalizeString(fmt.Sprintf("%s-%s", adxName, metadata.DatabaseName)))
activeDirectoryEndpoint, err := azure.ParseActiveDirectoryEndpoint(config.TriggerMetadata)
if err != nil {
diff --git a/pkg/scalers/azure_data_explorer_scaler_test.go b/pkg/scalers/azure_data_explorer_scaler_test.go
index f3ce13b7b59..f6748ea3095 100644
--- a/pkg/scalers/azure_data_explorer_scaler_test.go
+++ b/pkg/scalers/azure_data_explorer_scaler_test.go
@@ -34,7 +34,7 @@ type parseDataExplorerMetadataTestData struct {
type dataExplorerMetricIdentifier struct {
metadataTestData *parseDataExplorerMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -169,7 +169,7 @@ func TestDataExplorerGetMetricSpecForScaling(t *testing.T) {
TriggerMetadata: testData.metadataTestData.metadata,
AuthParams: map[string]string{},
PodIdentity: kedav1alpha1.AuthPodIdentity{},
- ScalerIndex: testData.scalerIndex},
+ TriggerIndex: testData.triggerIndex},
logr.Discard())
if err != nil {
t.Errorf("Test case %d: failed to parse metadata: %v", id, err)
diff --git a/pkg/scalers/azure_eventhub_scaler.go b/pkg/scalers/azure_eventhub_scaler.go
index 25d2ff29e7b..a8a696a872e 100644
--- a/pkg/scalers/azure_eventhub_scaler.go
+++ b/pkg/scalers/azure_eventhub_scaler.go
@@ -61,7 +61,7 @@ type eventHubMetadata struct {
threshold int64
activationThreshold int64
stalePartitionInfoThreshold int64
- scalerIndex int
+ triggerIndex int
}
// NewAzureEventHubScaler creates a new scaler for eventHub
@@ -189,7 +189,7 @@ func parseCommonAzureEventHubMetadata(config *ScalerConfig, meta *eventHubMetada
meta.stalePartitionInfoThreshold = stalePartitionInfoThreshold
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return nil
}
@@ -352,7 +352,7 @@ func GetUnprocessedEventCountWithoutCheckpoint(partitionInfo *eventhub.HubPartit
func (s *azureEventHubScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-eventhub-%s", s.metadata.eventHubInfo.EventHubConsumerGroup))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-eventhub-%s", s.metadata.eventHubInfo.EventHubConsumerGroup))),
},
Target: GetMetricTarget(s.metricType, s.metadata.threshold),
}
diff --git a/pkg/scalers/azure_eventhub_scaler_test.go b/pkg/scalers/azure_eventhub_scaler_test.go
index b1e5d678deb..908b17bcf2b 100644
--- a/pkg/scalers/azure_eventhub_scaler_test.go
+++ b/pkg/scalers/azure_eventhub_scaler_test.go
@@ -39,7 +39,7 @@ type parseEventHubMetadataTestData struct {
type eventHubMetricIdentifier struct {
metadataTestData *parseEventHubMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -646,7 +646,7 @@ func DeleteContainerInStorage(ctx context.Context, endpoint *url.URL, credential
func TestEventHubGetMetricSpecForScaling(t *testing.T) {
for _, testData := range eventHubMetricIdentifiers {
- meta, err := parseAzureEventHubMetadata(logr.Discard(), &ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: sampleEventHubResolvedEnv, AuthParams: map[string]string{}, ScalerIndex: testData.scalerIndex})
+ meta, err := parseAzureEventHubMetadata(logr.Discard(), &ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: sampleEventHubResolvedEnv, AuthParams: map[string]string{}, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/azure_log_analytics_scaler.go b/pkg/scalers/azure_log_analytics_scaler.go
index 11891647cbc..2079028b544 100644
--- a/pkg/scalers/azure_log_analytics_scaler.go
+++ b/pkg/scalers/azure_log_analytics_scaler.go
@@ -65,7 +65,7 @@ type azureLogAnalyticsMetadata struct {
query string
threshold float64
activationThreshold float64
- scalerIndex int
+ triggerIndex int
logAnalyticsResourceURL string
activeDirectoryEndpoint string
unsafeSsl bool
@@ -204,7 +204,7 @@ func parseAzureLogAnalyticsMetadata(config *ScalerConfig) (*azureLogAnalyticsMet
}
meta.activationThreshold = activationThreshold
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
meta.logAnalyticsResourceURL = defaultLogAnalyticsResourceURL
if cloud, ok := config.TriggerMetadata["cloud"]; ok {
@@ -257,7 +257,7 @@ func getParameterFromConfig(config *ScalerConfig, parameter string, checkAuthPar
func (s *azureLogAnalyticsScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("%s-%s", "azure-log-analytics", s.metadata.workspaceID))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("%s-%s", "azure-log-analytics", s.metadata.workspaceID))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.threshold),
}
diff --git a/pkg/scalers/azure_log_analytics_scaler_test.go b/pkg/scalers/azure_log_analytics_scaler_test.go
index d7e4249b008..6703ba658af 100644
--- a/pkg/scalers/azure_log_analytics_scaler_test.go
+++ b/pkg/scalers/azure_log_analytics_scaler_test.go
@@ -40,7 +40,7 @@ type parseLogAnalyticsMetadataTestData struct {
type LogAnalyticsMetricIdentifier struct {
metadataTestData *parseLogAnalyticsMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -194,7 +194,7 @@ func TestLogAnalyticsGetMetricSpecForScaling(t *testing.T) {
for _, testData := range LogAnalyticsMetricIdentifiers {
meta, err := parseAzureLogAnalyticsMetadata(&ScalerConfig{ResolvedEnv: sampleLogAnalyticsResolvedEnv,
TriggerMetadata: testData.metadataTestData.metadata, AuthParams: nil,
- PodIdentity: kedav1alpha1.AuthPodIdentity{}, ScalerIndex: testData.scalerIndex})
+ PodIdentity: kedav1alpha1.AuthPodIdentity{}, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/azure_monitor_scaler.go b/pkg/scalers/azure_monitor_scaler.go
index 5f66854ad33..64564babba4 100644
--- a/pkg/scalers/azure_monitor_scaler.go
+++ b/pkg/scalers/azure_monitor_scaler.go
@@ -49,7 +49,7 @@ type azureMonitorMetadata struct {
azureMonitorInfo azure.MonitorInfo
targetValue float64
activationTargetValue float64
- scalerIndex int
+ triggerIndex int
}
// NewAzureMonitorScaler creates a new AzureMonitorScaler
@@ -170,7 +170,7 @@ func parseAzureMonitorMetadata(config *ScalerConfig, logger logr.Logger) (*azure
meta.azureMonitorInfo.ClientID = clientID
meta.azureMonitorInfo.ClientPassword = clientPassword
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
azureResourceManagerEndpointProvider := func(env az.Environment) (string, error) {
return env.ResourceManagerEndpoint, nil
@@ -224,7 +224,7 @@ func (s *azureMonitorScaler) Close(context.Context) error {
func (s *azureMonitorScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-monitor-%s", s.metadata.azureMonitorInfo.Name))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-monitor-%s", s.metadata.azureMonitorInfo.Name))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.targetValue),
}
diff --git a/pkg/scalers/azure_monitor_scaler_test.go b/pkg/scalers/azure_monitor_scaler_test.go
index 00200c13c01..a328bdada05 100644
--- a/pkg/scalers/azure_monitor_scaler_test.go
+++ b/pkg/scalers/azure_monitor_scaler_test.go
@@ -39,7 +39,7 @@ type parseAzMonitorMetadataTestData struct {
type azMonitorMetricIdentifier struct {
metadataTestData *parseAzMonitorMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -126,7 +126,7 @@ func TestAzMonitorGetMetricSpecForScaling(t *testing.T) {
for _, testData := range azMonitorMetricIdentifiers {
meta, err := parseAzureMonitorMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata,
ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams,
- PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/azure_pipelines_scaler.go b/pkg/scalers/azure_pipelines_scaler.go
index f30d1805247..7bcdead70b9 100644
--- a/pkg/scalers/azure_pipelines_scaler.go
+++ b/pkg/scalers/azure_pipelines_scaler.go
@@ -135,7 +135,7 @@ type azurePipelinesMetadata struct {
targetPipelinesQueueLength int64
activationTargetPipelinesQueueLength int64
jobsToFetch int64
- scalerIndex int
+ triggerIndex int
requireAllDemands bool
}
@@ -260,7 +260,7 @@ func parseAzurePipelinesMetadata(ctx context.Context, config *ScalerConfig, http
// Trim any trailing new lines from the Azure Pipelines PAT
meta.personalAccessToken = strings.TrimSuffix(meta.personalAccessToken, "\n")
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -436,7 +436,7 @@ func getCanAgentParentFulfilJob(jr JobRequest, metadata *azurePipelinesMetadata)
func (s *azurePipelinesScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-pipelines-%d", s.metadata.poolID))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-pipelines-%d", s.metadata.poolID))),
},
Target: GetMetricTarget(s.metricType, s.metadata.targetPipelinesQueueLength),
}
diff --git a/pkg/scalers/azure_pipelines_scaler_test.go b/pkg/scalers/azure_pipelines_scaler_test.go
index 99eb81140ef..f333e2a4afb 100644
--- a/pkg/scalers/azure_pipelines_scaler_test.go
+++ b/pkg/scalers/azure_pipelines_scaler_test.go
@@ -134,8 +134,8 @@ func TestValidateAzurePipelinesPool(t *testing.T) {
}
type azurePipelinesMetricIdentifier struct {
- scalerIndex int
- name string
+ triggerIndex int
+ name string
}
var azurePipelinesMetricIdentifiers = []azurePipelinesMetricIdentifier{
@@ -160,7 +160,7 @@ func TestAzurePipelinesGetMetricSpecForScaling(t *testing.T) {
"targetPipelinesQueueLength": "1",
}
- meta, err := parseAzurePipelinesMetadata(context.TODO(), &ScalerConfig{TriggerMetadata: metadata, ResolvedEnv: nil, AuthParams: authParams, ScalerIndex: testData.scalerIndex}, http.DefaultClient)
+ meta, err := parseAzurePipelinesMetadata(context.TODO(), &ScalerConfig{TriggerMetadata: metadata, ResolvedEnv: nil, AuthParams: authParams, TriggerIndex: testData.triggerIndex}, http.DefaultClient)
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/azure_queue_scaler.go b/pkg/scalers/azure_queue_scaler.go
index 5551fec283b..d77d7f08db7 100644
--- a/pkg/scalers/azure_queue_scaler.go
+++ b/pkg/scalers/azure_queue_scaler.go
@@ -53,7 +53,7 @@ type azureQueueMetadata struct {
connection string
accountName string
endpointSuffix string
- scalerIndex int
+ triggerIndex int
}
// NewAzureQueueScaler creates a new scaler for queue
@@ -153,7 +153,7 @@ func parseAzureQueueMetadata(config *ScalerConfig, logger logr.Logger) (*azureQu
return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("pod identity %s not supported for azure storage queues", config.PodIdentity.Provider)
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, config.PodIdentity, nil
}
@@ -168,7 +168,7 @@ func (s *azureQueueScaler) Close(context.Context) error {
func (s *azureQueueScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-queue-%s", s.metadata.queueName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-queue-%s", s.metadata.queueName))),
},
Target: GetMetricTarget(s.metricType, s.metadata.targetQueueLength),
}
diff --git a/pkg/scalers/azure_queue_scaler_test.go b/pkg/scalers/azure_queue_scaler_test.go
index d95560e1283..948198ab4fa 100644
--- a/pkg/scalers/azure_queue_scaler_test.go
+++ b/pkg/scalers/azure_queue_scaler_test.go
@@ -40,7 +40,7 @@ type parseAzQueueMetadataTestData struct {
type azQueueMetricIdentifier struct {
metadataTestData *parseAzQueueMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -124,7 +124,7 @@ func TestAzQueueGetMetricSpecForScaling(t *testing.T) {
for _, testData := range azQueueMetricIdentifiers {
meta, podIdentity, err := parseAzureQueueMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata,
ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams,
- PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, ScalerIndex: testData.scalerIndex},
+ PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, TriggerIndex: testData.triggerIndex},
logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
diff --git a/pkg/scalers/azure_servicebus_scaler.go b/pkg/scalers/azure_servicebus_scaler.go
index 6c9d58894af..861c47c1558 100755
--- a/pkg/scalers/azure_servicebus_scaler.go
+++ b/pkg/scalers/azure_servicebus_scaler.go
@@ -65,7 +65,7 @@ type azureServiceBusMetadata struct {
useRegex bool
entityNameRegex *regexp.Regexp
operation string
- scalerIndex int
+ triggerIndex int
}
// NewAzureServiceBusScaler creates a new AzureServiceBusScaler
@@ -217,7 +217,7 @@ func parseAzureServiceBusMetadata(config *ScalerConfig, logger logr.Logger) (*az
return nil, fmt.Errorf("azure service bus doesn't support pod identity %s", config.PodIdentity.Provider)
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -246,7 +246,7 @@ func (s *azureServiceBusScaler) GetMetricSpecForScaling(context.Context) []v2.Me
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-servicebus-%s", metricName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-servicebus-%s", metricName))),
},
Target: GetMetricTarget(s.metricType, s.metadata.targetLength),
}
diff --git a/pkg/scalers/azure_servicebus_scaler_test.go b/pkg/scalers/azure_servicebus_scaler_test.go
index 72b36f9e6ed..7e76d6632c1 100755
--- a/pkg/scalers/azure_servicebus_scaler_test.go
+++ b/pkg/scalers/azure_servicebus_scaler_test.go
@@ -49,7 +49,7 @@ type parseServiceBusMetadataTestData struct {
type azServiceBusMetricIdentifier struct {
metadataTestData *parseServiceBusMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -169,7 +169,7 @@ func TestParseServiceBusMetadata(t *testing.T) {
for index, testData := range parseServiceBusMetadataDataset {
meta, err := parseAzureServiceBusMetadata(&ScalerConfig{ResolvedEnv: sampleResolvedEnv,
TriggerMetadata: testData.metadata, AuthParams: testData.authParams,
- PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentity}, ScalerIndex: 0},
+ PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentity}, TriggerIndex: 0},
logr.Discard())
if err != nil && !testData.isError {
@@ -194,7 +194,7 @@ func TestGetServiceBusAdminClientIsCached(t *testing.T) {
testData := azServiceBusMetricIdentifiers[0]
meta, err := parseAzureServiceBusMetadata(&ScalerConfig{ResolvedEnv: connectionResolvedEnv,
TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams,
- PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, ScalerIndex: testData.scalerIndex},
+ PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, TriggerIndex: testData.triggerIndex},
logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
@@ -244,7 +244,7 @@ func TestAzServiceBusGetMetricSpecForScaling(t *testing.T) {
for _, testData := range azServiceBusMetricIdentifiers {
meta, err := parseAzureServiceBusMetadata(&ScalerConfig{ResolvedEnv: connectionResolvedEnv,
TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams,
- PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, ScalerIndex: testData.scalerIndex},
+ PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, TriggerIndex: testData.triggerIndex},
logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
diff --git a/pkg/scalers/cassandra_scaler.go b/pkg/scalers/cassandra_scaler.go
index c1a52a48bb8..fc367473ed0 100644
--- a/pkg/scalers/cassandra_scaler.go
+++ b/pkg/scalers/cassandra_scaler.go
@@ -35,7 +35,7 @@ type CassandraMetadata struct {
query string
targetQueryValue int64
activationTargetQueryValue int64
- scalerIndex int
+ triggerIndex int
}
// NewCassandraScaler creates a new Cassandra scaler.
@@ -156,7 +156,7 @@ func parseCassandraMetadata(config *ScalerConfig) (*CassandraMetadata, error) {
return nil, fmt.Errorf("no password given")
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -184,7 +184,7 @@ func newCassandraSession(meta *CassandraMetadata, logger logr.Logger) (*gocql.Se
func (s *cassandraScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("cassandra-%s", s.metadata.keyspace))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("cassandra-%s", s.metadata.keyspace))),
},
Target: GetMetricTarget(s.metricType, s.metadata.targetQueryValue),
}
diff --git a/pkg/scalers/cassandra_scaler_test.go b/pkg/scalers/cassandra_scaler_test.go
index 816e82dc9ee..b791efbf08e 100644
--- a/pkg/scalers/cassandra_scaler_test.go
+++ b/pkg/scalers/cassandra_scaler_test.go
@@ -16,7 +16,7 @@ type parseCassandraMetadataTestData struct {
type cassandraMetricIdentifier struct {
metadataTestData *parseCassandraMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -24,25 +24,25 @@ var testCassandraMetadata = []parseCassandraMetadataTestData{
// nothing passed
{map[string]string{}, true, map[string]string{}},
// everything is passed in verbatim
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "clusterIPAddress": "cassandra.test", "keyspace": "test_keyspace", "ScalerIndex": "0"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "clusterIPAddress": "cassandra.test", "keyspace": "test_keyspace", "TriggerIndex": "0"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// metricName is generated from keyspace
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "TriggerIndex": "0"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no query passed
- {map[string]string{"targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "TriggerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no targetQueryValue passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "TriggerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no username passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "TriggerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no port passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test", "keyspace": "test_keyspace", "ScalerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test", "keyspace": "test_keyspace", "TriggerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no clusterIPAddress passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "keyspace": "test_keyspace", "ScalerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "keyspace": "test_keyspace", "TriggerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no keyspace passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "ScalerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "TriggerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no password passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0"}, true, map[string]string{}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "TriggerIndex": "0"}, true, map[string]string{}},
// fix issue[4110] passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "clusterIPAddress": "https://cassandra.test", "keyspace": "test_keyspace", "ScalerIndex": "0"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "clusterIPAddress": "https://cassandra.test", "keyspace": "test_keyspace", "TriggerIndex": "0"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
}
var cassandraMetricIdentifiers = []cassandraMetricIdentifier{
@@ -66,7 +66,7 @@ func TestCassandraParseMetadata(t *testing.T) {
func TestCassandraGetMetricSpecForScaling(t *testing.T) {
for _, testData := range cassandraMetricIdentifiers {
- meta, err := parseCassandraMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ScalerIndex: testData.scalerIndex, AuthParams: testData.metadataTestData.authParams})
+ meta, err := parseCassandraMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex, AuthParams: testData.metadataTestData.authParams})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/couchdb_scaler.go b/pkg/scalers/couchdb_scaler.go
index 3025e807ec7..8a851be1af8 100644
--- a/pkg/scalers/couchdb_scaler.go
+++ b/pkg/scalers/couchdb_scaler.go
@@ -38,7 +38,7 @@ type couchDBMetadata struct {
query string
queryValue int64
activationQueryValue int64
- scalerIndex int
+ triggerIndex int
}
type Res struct {
@@ -50,7 +50,7 @@ type Res struct {
func (s *couchDBScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("coucdb-%s", s.metadata.dbName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("coucdb-%s", s.metadata.dbName))),
},
Target: GetMetricTarget(s.metricType, s.metadata.queryValue),
}
@@ -179,7 +179,7 @@ func parseCouchDBMetadata(config *ScalerConfig) (*couchDBMetadata, string, error
// nosemgrep: db-connection-string
connStr = "http://" + addr
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, connStr, nil
}
diff --git a/pkg/scalers/couchdb_scaler_test.go b/pkg/scalers/couchdb_scaler_test.go
index 8abde9b0797..c7b69aa7157 100644
--- a/pkg/scalers/couchdb_scaler_test.go
+++ b/pkg/scalers/couchdb_scaler_test.go
@@ -23,7 +23,7 @@ type parseCouchDBMetadataTestData struct {
type couchDBMetricIdentifier struct {
metadataTestData *parseCouchDBMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -66,8 +66,8 @@ var testCOUCHDBMetadata = []parseCouchDBMetadataTestData{
}
var couchDBMetricIdentifiers = []couchDBMetricIdentifier{
- {metadataTestData: &testCOUCHDBMetadata[2], scalerIndex: 0, name: "s0-coucdb-animals"},
- {metadataTestData: &testCOUCHDBMetadata[2], scalerIndex: 1, name: "s1-coucdb-animals"},
+ {metadataTestData: &testCOUCHDBMetadata[2], triggerIndex: 0, name: "s0-coucdb-animals"},
+ {metadataTestData: &testCOUCHDBMetadata[2], triggerIndex: 1, name: "s1-coucdb-animals"},
}
func TestParseCouchDBMetadata(t *testing.T) {
@@ -81,7 +81,7 @@ func TestParseCouchDBMetadata(t *testing.T) {
func TestCouchDBGetMetricSpecForScaling(t *testing.T) {
for _, testData := range couchDBMetricIdentifiers {
- meta, _, err := parseCouchDBMetadata(&ScalerConfig{ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, TriggerMetadata: testData.metadataTestData.metadata, ScalerIndex: testData.scalerIndex})
+ meta, _, err := parseCouchDBMetadata(&ScalerConfig{ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/cron_scaler.go b/pkg/scalers/cron_scaler.go
index 9bb269eaf6f..f1696ddff16 100644
--- a/pkg/scalers/cron_scaler.go
+++ b/pkg/scalers/cron_scaler.go
@@ -31,7 +31,7 @@ type cronMetadata struct {
end string
timezone string
desiredReplicas int64
- scalerIndex int
+ triggerIndex int
}
// NewCronScaler creates a new cronScaler
@@ -110,7 +110,7 @@ func parseCronMetadata(config *ScalerConfig) (*cronMetadata, error) {
} else {
return nil, fmt.Errorf("no DesiredReplicas specified. %s", config.TriggerMetadata)
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -131,7 +131,7 @@ func (s *cronScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
var specReplicas int64 = 1
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("cron-%s-%s-%s", s.metadata.timezone, parseCronTimeFormat(s.metadata.start), parseCronTimeFormat(s.metadata.end)))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("cron-%s-%s-%s", s.metadata.timezone, parseCronTimeFormat(s.metadata.start), parseCronTimeFormat(s.metadata.end)))),
},
Target: GetMetricTarget(s.metricType, specReplicas),
}
diff --git a/pkg/scalers/cron_scaler_test.go b/pkg/scalers/cron_scaler_test.go
index b0643186d65..937a17b865c 100644
--- a/pkg/scalers/cron_scaler_test.go
+++ b/pkg/scalers/cron_scaler_test.go
@@ -16,7 +16,7 @@ type parseCronMetadataTestData struct {
type cronMetricIdentifier struct {
metadataTestData *parseCronMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -115,7 +115,7 @@ func TestGetMetricsRange(t *testing.T) {
func TestCronGetMetricSpecForScaling(t *testing.T) {
for _, testData := range cronMetricIdentifiers {
- meta, err := parseCronMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ScalerIndex: testData.scalerIndex})
+ meta, err := parseCronMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/datadog_scaler.go b/pkg/scalers/datadog_scaler.go
index c018877201e..f53f9faf939 100644
--- a/pkg/scalers/datadog_scaler.go
+++ b/pkg/scalers/datadog_scaler.go
@@ -223,7 +223,7 @@ func parseDatadogMetadata(config *ScalerConfig, logger logr.Logger) (*datadogMet
meta.datadogSite = siteVal
metricName := meta.query[0:strings.Index(meta.query, "{")]
- meta.metricName = GenerateMetricNameWithIndex(config.ScalerIndex, kedautil.NormalizeString(fmt.Sprintf("datadog-%s", metricName)))
+ meta.metricName = GenerateMetricNameWithIndex(config.TriggerIndex, kedautil.NormalizeString(fmt.Sprintf("datadog-%s", metricName)))
return &meta, nil
}
diff --git a/pkg/scalers/datadog_scaler_test.go b/pkg/scalers/datadog_scaler_test.go
index e5f8bcec95e..382476e6439 100644
--- a/pkg/scalers/datadog_scaler_test.go
+++ b/pkg/scalers/datadog_scaler_test.go
@@ -16,7 +16,7 @@ type datadogQueries struct {
type datadogMetricIdentifier struct {
metadataTestData *datadogAuthMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -153,7 +153,7 @@ var datadogMetricIdentifiers = []datadogMetricIdentifier{
func TestDatadogGetMetricSpecForScaling(t *testing.T) {
for _, testData := range datadogMetricIdentifiers {
- meta, err := parseDatadogMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex, MetricType: testData.metadataTestData.metricType}, logr.Discard())
+ meta, err := parseDatadogMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex, MetricType: testData.metadataTestData.metricType}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/elasticsearch_scaler.go b/pkg/scalers/elasticsearch_scaler.go
index 6e72ea4ce5b..9a806e1b47f 100644
--- a/pkg/scalers/elasticsearch_scaler.go
+++ b/pkg/scalers/elasticsearch_scaler.go
@@ -220,7 +220,7 @@ func parseElasticsearchMetadata(config *ScalerConfig) (*elasticsearchMetadata, e
meta.activationTargetValue = activationTargetValue
}
- meta.metricName = GenerateMetricNameWithIndex(config.ScalerIndex, util.NormalizeString(fmt.Sprintf("elasticsearch-%s", meta.searchTemplateName)))
+ meta.metricName = GenerateMetricNameWithIndex(config.TriggerIndex, util.NormalizeString(fmt.Sprintf("elasticsearch-%s", meta.searchTemplateName)))
return &meta, nil
}
diff --git a/pkg/scalers/elasticsearch_scaler_test.go b/pkg/scalers/elasticsearch_scaler_test.go
index ac2100491d8..467ac6a6ade 100644
--- a/pkg/scalers/elasticsearch_scaler_test.go
+++ b/pkg/scalers/elasticsearch_scaler_test.go
@@ -27,7 +27,7 @@ type paramsTestData struct {
type elasticsearchMetricIdentifier struct {
metadataTestData *parseElasticsearchMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -457,7 +457,7 @@ func TestElasticsearchGetMetricSpecForScaling(t *testing.T) {
meta, err := parseElasticsearchMetadata(&ScalerConfig{
TriggerMetadata: testData.metadataTestData.metadata,
AuthParams: testData.metadataTestData.authParams,
- ScalerIndex: testData.scalerIndex,
+ TriggerIndex: testData.triggerIndex,
})
if testData.metadataTestData.expectedError != nil {
assert.ErrorIs(t, err, testData.metadataTestData.expectedError)
diff --git a/pkg/scalers/etcd_scaler.go b/pkg/scalers/etcd_scaler.go
index 2c4ad59107a..25d9c750c2a 100644
--- a/pkg/scalers/etcd_scaler.go
+++ b/pkg/scalers/etcd_scaler.go
@@ -42,7 +42,7 @@ type etcdMetadata struct {
value float64
activationValue float64
watchProgressNotifyInterval int
- scalerIndex int
+ triggerIndex int
// TLS
enableTLS bool
cert string
@@ -146,7 +146,7 @@ func parseEtcdMetadata(config *ScalerConfig) (*etcdMetadata, error) {
return meta, err
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return meta, nil
}
@@ -195,7 +195,7 @@ func (s *etcdScaler) GetMetricsAndActivity(ctx context.Context, metricName strin
func (s *etcdScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("etcd-%s", s.metadata.watchKey))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("etcd-%s", s.metadata.watchKey))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.value),
}
diff --git a/pkg/scalers/etcd_scaler_test.go b/pkg/scalers/etcd_scaler_test.go
index 24da26c31ad..768a4e156d6 100644
--- a/pkg/scalers/etcd_scaler_test.go
+++ b/pkg/scalers/etcd_scaler_test.go
@@ -22,7 +22,7 @@ type parseEtcdAuthParamsTestData struct {
type etcdMetricIdentifier struct {
metadataTestData *parseEtcdMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -121,7 +121,7 @@ func TestParseEtcdAuthParams(t *testing.T) {
func TestEtcdGetMetricSpecForScaling(t *testing.T) {
for _, testData := range etcdMetricIdentifiers {
- meta, err := parseEtcdMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ScalerIndex: testData.scalerIndex})
+ meta, err := parseEtcdMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/external_scaler.go b/pkg/scalers/external_scaler.go
index 78bbf5dea8b..6b8ab00575d 100644
--- a/pkg/scalers/external_scaler.go
+++ b/pkg/scalers/external_scaler.go
@@ -36,7 +36,7 @@ type externalScalerMetadata struct {
scalerAddress string
tlsCertFile string
originalMetadata map[string]string
- scalerIndex int
+ triggerIndex int
caCert string
tlsClientCert string
tlsClientKey string
@@ -151,7 +151,7 @@ func parseExternalScalerMetadata(config *ScalerConfig) (externalScalerMetadata,
meta.originalMetadata[key] = value
}
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return meta, nil
}
@@ -178,7 +178,7 @@ func (s *externalScaler) GetMetricSpecForScaling(ctx context.Context) []v2.Metri
for _, spec := range response.MetricSpecs {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, spec.MetricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, spec.MetricName),
},
Target: GetMetricTarget(s.metricType, spec.TargetSize),
}
@@ -204,7 +204,7 @@ func (s *externalScaler) GetMetricsAndActivity(ctx context.Context, metricName s
}
// Remove the sX- prefix as the external scaler shouldn't have to know about it
- metricNameWithoutIndex, err := RemoveIndexFromMetricName(s.metadata.scalerIndex, metricName)
+ metricNameWithoutIndex, err := RemoveIndexFromMetricName(s.metadata.triggerIndex, metricName)
if err != nil {
return []external_metrics.ExternalMetricValue{}, false, err
}
diff --git a/pkg/scalers/externalscaler/externalscaler.pb.go b/pkg/scalers/externalscaler/externalscaler.pb.go
index 39ddf2b34e6..b67c09f0875 100644
--- a/pkg/scalers/externalscaler/externalscaler.pb.go
+++ b/pkg/scalers/externalscaler/externalscaler.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
-// protoc v4.23.2
+// protoc-gen-go v1.31.0
+// protoc v4.23.4
// source: externalscaler.proto
package externalscaler
diff --git a/pkg/scalers/externalscaler/externalscaler_grpc.pb.go b/pkg/scalers/externalscaler/externalscaler_grpc.pb.go
index 5489ae58ac4..e9944342368 100644
--- a/pkg/scalers/externalscaler/externalscaler_grpc.pb.go
+++ b/pkg/scalers/externalscaler/externalscaler_grpc.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
-// - protoc v4.23.2
+// - protoc v4.23.4
// source: externalscaler.proto
package externalscaler
diff --git a/pkg/scalers/gcp_cloud_tasks_scaler.go b/pkg/scalers/gcp_cloud_tasks_scaler.go
index dee2cc38798..5eaf734e737 100644
--- a/pkg/scalers/gcp_cloud_tasks_scaler.go
+++ b/pkg/scalers/gcp_cloud_tasks_scaler.go
@@ -32,7 +32,7 @@ type gcpCloudTaskMetadata struct {
queueName string
projectID string
gcpAuthorization *gcpAuthorizationMetadata
- scalerIndex int
+ triggerIndex int
}
// NewGcpCloudTasksScaler creates a new cloudTaskScaler
@@ -102,7 +102,7 @@ func parseGcpCloudTasksMetadata(config *ScalerConfig) (*gcpCloudTaskMetadata, er
return nil, err
}
meta.gcpAuthorization = auth
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -122,7 +122,7 @@ func (s *gcpCloudTasksScaler) Close(context.Context) error {
func (s *gcpCloudTasksScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("gcp-ct-%s", s.metadata.queueName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("gcp-ct-%s", s.metadata.queueName))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.value),
}
diff --git a/pkg/scalers/gcp_cloud_tasks_scaler_test.go b/pkg/scalers/gcp_cloud_tasks_scaler_test.go
index a5801ef2456..6fd20234eab 100644
--- a/pkg/scalers/gcp_cloud_tasks_scaler_test.go
+++ b/pkg/scalers/gcp_cloud_tasks_scaler_test.go
@@ -19,7 +19,7 @@ type parseGcpCloudTasksMetadataTestData struct {
type gcpCloudTasksMetricIdentifier struct {
metadataTestData *parseGcpCloudTasksMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -64,7 +64,7 @@ func TestGcpCloudTasksParseMetadata(t *testing.T) {
func TestGcpCloudTasksGetMetricSpecForScaling(t *testing.T) {
for _, testData := range gcpCloudTasksMetricIdentifiers {
- meta, err := parseGcpCloudTasksMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testGcpCloudTasksResolvedEnv, ScalerIndex: testData.scalerIndex})
+ meta, err := parseGcpCloudTasksMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testGcpCloudTasksResolvedEnv, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/gcp_pubsub_scaler.go b/pkg/scalers/gcp_pubsub_scaler.go
index bb0df757899..aee3f341831 100644
--- a/pkg/scalers/gcp_pubsub_scaler.go
+++ b/pkg/scalers/gcp_pubsub_scaler.go
@@ -44,7 +44,7 @@ type pubsubMetadata struct {
resourceType string
resourceName string
gcpAuthorization *gcpAuthorizationMetadata
- scalerIndex int
+ triggerIndex int
aggregation string
}
@@ -140,7 +140,7 @@ func parsePubSubMetadata(config *ScalerConfig, logger logr.Logger) (*pubsubMetad
return nil, err
}
meta.gcpAuthorization = auth
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -160,7 +160,7 @@ func (s *pubsubScaler) Close(context.Context) error {
func (s *pubsubScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("gcp-ps-%s", s.metadata.resourceName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("gcp-ps-%s", s.metadata.resourceName))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.value),
}
diff --git a/pkg/scalers/gcp_pubsub_scaler_test.go b/pkg/scalers/gcp_pubsub_scaler_test.go
index 13349073fa1..3a8c05665fc 100644
--- a/pkg/scalers/gcp_pubsub_scaler_test.go
+++ b/pkg/scalers/gcp_pubsub_scaler_test.go
@@ -19,13 +19,13 @@ type parsePubSubMetadataTestData struct {
type gcpPubSubMetricIdentifier struct {
metadataTestData *parsePubSubMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
type gcpPubSubSubscription struct {
metadataTestData *parsePubSubMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
projectID string
}
@@ -124,7 +124,7 @@ func TestPubSubMetadataDefaultValues(t *testing.T) {
func TestGcpPubSubGetMetricSpecForScaling(t *testing.T) {
for _, testData := range gcpPubSubMetricIdentifiers {
- meta, err := parsePubSubMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testPubSubResolvedEnv, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ meta, err := parsePubSubMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testPubSubResolvedEnv, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
@@ -140,7 +140,7 @@ func TestGcpPubSubGetMetricSpecForScaling(t *testing.T) {
func TestGcpPubSubSubscriptionName(t *testing.T) {
for _, testData := range gcpResourceNameTests {
- meta, err := parsePubSubMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testPubSubResolvedEnv, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ meta, err := parsePubSubMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testPubSubResolvedEnv, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/gcp_stackdriver_scaler.go b/pkg/scalers/gcp_stackdriver_scaler.go
index 1b63bd09cb5..4e01ba327ea 100644
--- a/pkg/scalers/gcp_stackdriver_scaler.go
+++ b/pkg/scalers/gcp_stackdriver_scaler.go
@@ -89,7 +89,7 @@ func parseStackdriverMetadata(config *ScalerConfig, logger logr.Logger) (*stackd
}
name := kedautil.NormalizeString(fmt.Sprintf("gcp-stackdriver-%s", meta.projectID))
- meta.metricName = GenerateMetricNameWithIndex(config.ScalerIndex, name)
+ meta.metricName = GenerateMetricNameWithIndex(config.TriggerIndex, name)
if val, ok := config.TriggerMetadata["targetValue"]; ok {
targetValue, err := strconv.ParseFloat(val, 64)
diff --git a/pkg/scalers/gcp_stackdriver_scaler_test.go b/pkg/scalers/gcp_stackdriver_scaler_test.go
index f20bd3e4250..80629ea21b2 100644
--- a/pkg/scalers/gcp_stackdriver_scaler_test.go
+++ b/pkg/scalers/gcp_stackdriver_scaler_test.go
@@ -19,7 +19,7 @@ type parseStackdriverMetadataTestData struct {
type gcpStackdriverMetricIdentifier struct {
metadataTestData *parseStackdriverMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -80,7 +80,7 @@ func TestStackdriverParseMetadata(t *testing.T) {
func TestGcpStackdriverGetMetricSpecForScaling(t *testing.T) {
for _, testData := range gcpStackdriverMetricIdentifiers {
- meta, err := parseStackdriverMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testStackdriverResolvedEnv, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ meta, err := parseStackdriverMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testStackdriverResolvedEnv, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/gcp_storage_scaler.go b/pkg/scalers/gcp_storage_scaler.go
index 0677a17c1a6..07848014b07 100644
--- a/pkg/scalers/gcp_storage_scaler.go
+++ b/pkg/scalers/gcp_storage_scaler.go
@@ -152,7 +152,7 @@ func parseGcsMetadata(config *ScalerConfig, logger logr.Logger) (*gcsMetadata, e
meta.gcpAuthorization = auth
var metricName = kedautil.NormalizeString(fmt.Sprintf("gcp-storage-%s", meta.bucketName))
- meta.metricName = GenerateMetricNameWithIndex(config.ScalerIndex, metricName)
+ meta.metricName = GenerateMetricNameWithIndex(config.TriggerIndex, metricName)
return &meta, nil
}
diff --git a/pkg/scalers/gcp_storage_scaler_test.go b/pkg/scalers/gcp_storage_scaler_test.go
index b353d543ac1..fe27f3ba25c 100644
--- a/pkg/scalers/gcp_storage_scaler_test.go
+++ b/pkg/scalers/gcp_storage_scaler_test.go
@@ -19,7 +19,7 @@ type parseGcsMetadataTestData struct {
type gcpGcsMetricIdentifier struct {
metadataTestData *parseGcsMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -64,7 +64,7 @@ func TestGcsParseMetadata(t *testing.T) {
func TestGcsGetMetricSpecForScaling(t *testing.T) {
for _, testData := range gcpGcsMetricIdentifiers {
- meta, err := parseGcsMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testGcsResolvedEnv, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ meta, err := parseGcsMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testGcsResolvedEnv, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/github_runner_scaler.go b/pkg/scalers/github_runner_scaler.go
index 9e006d175f4..bba7b388454 100644
--- a/pkg/scalers/github_runner_scaler.go
+++ b/pkg/scalers/github_runner_scaler.go
@@ -43,7 +43,7 @@ type githubRunnerMetadata struct {
repos []string
labels []string
targetWorkflowQueueLength int64
- scalerIndex int
+ triggerIndex int
applicationID *int64
installationID *int64
applicationKey *string
@@ -432,7 +432,7 @@ func parseGitHubRunnerMetadata(config *ScalerConfig) (*githubRunnerMetadata, err
return nil, fmt.Errorf("no personalAccessToken or appKey given")
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return meta, nil
}
@@ -661,7 +661,7 @@ func (s *githubRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricNa
func (s *githubRunnerScaler) GetMetricSpecForScaling(_ context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("github-runner-%s", s.metadata.owner))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("github-runner-%s", s.metadata.owner))),
},
Target: GetMetricTarget(s.metricType, s.metadata.targetWorkflowQueueLength),
}
diff --git a/pkg/scalers/github_runner_scaler_test.go b/pkg/scalers/github_runner_scaler_test.go
index d09b1f79ed5..2ab6f320795 100644
--- a/pkg/scalers/github_runner_scaler_test.go
+++ b/pkg/scalers/github_runner_scaler_test.go
@@ -555,7 +555,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledRepos_NoRate(t *testi
type githubRunnerMetricIdentifier struct {
metadataTestData *map[string]string
- scalerIndex int
+ triggerIndex int
name string
}
@@ -567,7 +567,7 @@ var githubRunnerMetricIdentifiers = []githubRunnerMetricIdentifier{
func TestGithubRunnerGetMetricSpecForScaling(t *testing.T) {
for i, testData := range githubRunnerMetricIdentifiers {
ctx := context.Background()
- meta, err := parseGitHubRunnerMetadata(&ScalerConfig{ResolvedEnv: testGitHubRunnerResolvedEnv, TriggerMetadata: *testData.metadataTestData, AuthParams: testAuthParams, ScalerIndex: testData.scalerIndex})
+ meta, err := parseGitHubRunnerMetadata(&ScalerConfig{ResolvedEnv: testGitHubRunnerResolvedEnv, TriggerMetadata: *testData.metadataTestData, AuthParams: testAuthParams, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/graphite_scaler.go b/pkg/scalers/graphite_scaler.go
index 37e54282cfa..3789cdf3912 100644
--- a/pkg/scalers/graphite_scaler.go
+++ b/pkg/scalers/graphite_scaler.go
@@ -44,7 +44,7 @@ type graphiteMetadata struct {
enableBasicAuth bool
username string
password string // +optional
- scalerIndex int
+ triggerIndex int
}
type grapQueryResult []struct {
@@ -116,7 +116,7 @@ func parseGraphiteMetadata(config *ScalerConfig) (*graphiteMetadata, error) {
meta.activationThreshold = t
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
val, ok := config.TriggerMetadata["authMode"]
// no authMode specified
@@ -150,7 +150,7 @@ func (s *graphiteScaler) Close(context.Context) error {
func (s *graphiteScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, "graphite"),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, "graphite"),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.threshold),
}
diff --git a/pkg/scalers/graphite_scaler_test.go b/pkg/scalers/graphite_scaler_test.go
index fc30c8671c5..2f855ab6a32 100644
--- a/pkg/scalers/graphite_scaler_test.go
+++ b/pkg/scalers/graphite_scaler_test.go
@@ -17,7 +17,7 @@ type parseGraphiteMetadataTestData struct {
type graphiteMetricIdentifier struct {
metadataTestData *parseGraphiteMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -125,7 +125,7 @@ func TestGraphiteParseMetadata(t *testing.T) {
func TestGraphiteGetMetricSpecForScaling(t *testing.T) {
for _, testData := range graphiteMetricIdentifiers {
ctx := context.Background()
- meta, err := parseGraphiteMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ScalerIndex: testData.scalerIndex})
+ meta, err := parseGraphiteMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/huawei_cloudeye_scaler.go b/pkg/scalers/huawei_cloudeye_scaler.go
index 4f13a10d6c7..18af51e120f 100644
--- a/pkg/scalers/huawei_cloudeye_scaler.go
+++ b/pkg/scalers/huawei_cloudeye_scaler.go
@@ -46,7 +46,7 @@ type huaweiCloudeyeMetadata struct {
huaweiAuthorization huaweiAuthorizationMetadata
- scalerIndex int
+ triggerIndex int
}
type huaweiAuthorizationMetadata struct {
@@ -182,7 +182,7 @@ func parseHuaweiCloudeyeMetadata(config *ScalerConfig, logger logr.Logger) (*hua
}
meta.huaweiAuthorization = auth
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -255,7 +255,7 @@ func (s *huaweiCloudeyeScaler) GetMetricsAndActivity(_ context.Context, metricNa
func (s *huaweiCloudeyeScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("huawei-cloudeye-%s", s.metadata.metricsName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("huawei-cloudeye-%s", s.metadata.metricsName))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.targetMetricValue),
}
diff --git a/pkg/scalers/huawei_cloudeye_scaler_test.go b/pkg/scalers/huawei_cloudeye_scaler_test.go
index 7d59e706ae0..3359c3bf305 100644
--- a/pkg/scalers/huawei_cloudeye_scaler_test.go
+++ b/pkg/scalers/huawei_cloudeye_scaler_test.go
@@ -27,7 +27,7 @@ type parseHuaweiCloudeyeMetadataTestData struct {
type huaweiCloudeyeMetricIdentifier struct {
metadataTestData *parseHuaweiCloudeyeMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -172,7 +172,7 @@ func TestHuaweiCloudeyeParseMetadata(t *testing.T) {
func TestHuaweiCloudeyeGetMetricSpecForScaling(t *testing.T) {
for _, testData := range huaweiCloudeyeMetricIdentifiers {
- meta, err := parseHuaweiCloudeyeMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ meta, err := parseHuaweiCloudeyeMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/ibmmq_scaler.go b/pkg/scalers/ibmmq_scaler.go
index 15591524d37..6be263250fc 100644
--- a/pkg/scalers/ibmmq_scaler.go
+++ b/pkg/scalers/ibmmq_scaler.go
@@ -42,7 +42,7 @@ type IBMMQMetadata struct {
queueDepth int64
activationQueueDepth int64
tlsDisabled bool
- scalerIndex int
+ triggerIndex int
}
// CommandResponse Full structured response from MQ admin REST query
@@ -159,7 +159,7 @@ func parseIBMMQMetadata(config *ScalerConfig) (*IBMMQMetadata, error) {
default:
return nil, fmt.Errorf("no password given")
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -206,7 +206,7 @@ func (s *IBMMQScaler) getQueueDepthViaHTTP(ctx context.Context) (int64, error) {
func (s *IBMMQScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("ibmmq-%s", s.metadata.queueName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("ibmmq-%s", s.metadata.queueName))),
},
Target: GetMetricTarget(s.metricType, s.metadata.queueDepth),
}
diff --git a/pkg/scalers/ibmmq_scaler_test.go b/pkg/scalers/ibmmq_scaler_test.go
index 1e5ee298c37..7a766d4ef6b 100644
--- a/pkg/scalers/ibmmq_scaler_test.go
+++ b/pkg/scalers/ibmmq_scaler_test.go
@@ -28,7 +28,7 @@ var sampleIBMMQResolvedEnv = map[string]string{
// Test metric identifier with test MQ data and it's name
type IBMMQMetricIdentifier struct {
metadataTestData *parseIBMMQMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -108,7 +108,7 @@ func TestParseDefaultQueueDepth(t *testing.T) {
// Create a scaler and check if metrics method is available
func TestIBMMQGetMetricSpecForScaling(t *testing.T) {
for _, testData := range IBMMQMetricIdentifiers {
- metadata, err := parseIBMMQMetadata(&ScalerConfig{ResolvedEnv: sampleIBMMQResolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex})
+ metadata, err := parseIBMMQMetadata(&ScalerConfig{ResolvedEnv: sampleIBMMQResolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex})
httpTimeout := 100 * time.Millisecond
if err != nil {
diff --git a/pkg/scalers/influxdb_scaler.go b/pkg/scalers/influxdb_scaler.go
index 8522607cee3..04ad173e9f8 100644
--- a/pkg/scalers/influxdb_scaler.go
+++ b/pkg/scalers/influxdb_scaler.go
@@ -29,7 +29,7 @@ type influxDBMetadata struct {
unsafeSsl bool
thresholdValue float64
activationThresholdValue float64
- scalerIndex int
+ triggerIndex int
}
// NewInfluxDBScaler creates a new influx db scaler
@@ -154,7 +154,7 @@ func parseInfluxDBMetadata(config *ScalerConfig) (*influxDBMetadata, error) {
thresholdValue: thresholdValue,
activationThresholdValue: activationThresholdValue,
unsafeSsl: unsafeSsl,
- scalerIndex: config.ScalerIndex,
+ triggerIndex: config.TriggerIndex,
}, nil
}
@@ -207,7 +207,7 @@ func (s *influxDBScaler) GetMetricsAndActivity(ctx context.Context, metricName s
func (s *influxDBScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, util.NormalizeString(fmt.Sprintf("influxdb-%s", s.metadata.organizationName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, util.NormalizeString(fmt.Sprintf("influxdb-%s", s.metadata.organizationName))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.thresholdValue),
}
diff --git a/pkg/scalers/influxdb_scaler_test.go b/pkg/scalers/influxdb_scaler_test.go
index dcbab435664..73c88b66d03 100644
--- a/pkg/scalers/influxdb_scaler_test.go
+++ b/pkg/scalers/influxdb_scaler_test.go
@@ -21,7 +21,7 @@ type parseInfluxDBMetadataTestData struct {
type influxDBMetricIdentifier struct {
metadataTestData *parseInfluxDBMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -71,7 +71,7 @@ func TestInfluxDBParseMetadata(t *testing.T) {
func TestInfluxDBGetMetricSpecForScaling(t *testing.T) {
for _, testData := range influxDBMetricIdentifiers {
- meta, err := parseInfluxDBMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testInfluxDBResolvedEnv, ScalerIndex: testData.scalerIndex})
+ meta, err := parseInfluxDBMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testInfluxDBResolvedEnv, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/kafka_scaler.go b/pkg/scalers/kafka_scaler.go
index 02b18db6e84..6d593b513fd 100644
--- a/pkg/scalers/kafka_scaler.go
+++ b/pkg/scalers/kafka_scaler.go
@@ -90,7 +90,7 @@ type kafkaMetadata struct {
ca string
unsafeSsl bool
- scalerIndex int
+ triggerIndex int
}
type offsetResetPolicy string
@@ -494,7 +494,7 @@ func parseKafkaMetadata(config *ScalerConfig, logger logr.Logger) (kafkaMetadata
}
meta.version = version
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return meta, nil
}
@@ -740,7 +740,7 @@ func (s *kafkaScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(metricName)),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(metricName)),
},
Target: GetMetricTarget(s.metricType, s.metadata.lagThreshold),
}
diff --git a/pkg/scalers/kafka_scaler_test.go b/pkg/scalers/kafka_scaler_test.go
index 5ff3274103d..311a9805c6b 100644
--- a/pkg/scalers/kafka_scaler_test.go
+++ b/pkg/scalers/kafka_scaler_test.go
@@ -43,7 +43,7 @@ type parseAuthParamsTestDataSecondAuthMethod struct {
type kafkaMetricIdentifier struct {
metadataTestData *parseKafkaMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -512,7 +512,7 @@ func TestKafkaOAuthbrearerAuthParams(t *testing.T) {
func TestKafkaGetMetricSpecForScaling(t *testing.T) {
for _, testData := range kafkaMetricIdentifiers {
- meta, err := parseKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: validWithAuthParams, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ meta, err := parseKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: validWithAuthParams, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/kubernetes_workload_scaler.go b/pkg/scalers/kubernetes_workload_scaler.go
index 3e2bc8fca91..bb35b17ba56 100644
--- a/pkg/scalers/kubernetes_workload_scaler.go
+++ b/pkg/scalers/kubernetes_workload_scaler.go
@@ -39,7 +39,7 @@ type kubernetesWorkloadMetadata struct {
namespace string
value float64
activationValue float64
- scalerIndex int
+ triggerIndex int
}
// NewKubernetesWorkloadScaler creates a new kubernetesWorkloadScaler
@@ -90,7 +90,7 @@ func parseWorkloadMetadata(config *ScalerConfig) (*kubernetesWorkloadMetadata, e
meta.activationValue = activationValue
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return meta, nil
}
@@ -103,7 +103,7 @@ func (s *kubernetesWorkloadScaler) Close(context.Context) error {
func (s *kubernetesWorkloadScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("workload-%s", s.metadata.namespace))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("workload-%s", s.metadata.namespace))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.value),
}
diff --git a/pkg/scalers/kubernetes_workload_scaler_test.go b/pkg/scalers/kubernetes_workload_scaler_test.go
index a015a77c3ba..dc83fae3d09 100644
--- a/pkg/scalers/kubernetes_workload_scaler_test.go
+++ b/pkg/scalers/kubernetes_workload_scaler_test.go
@@ -86,10 +86,10 @@ func TestWorkloadIsActive(t *testing.T) {
}
type workloadGetMetricSpecForScalingTestData struct {
- metadata map[string]string
- namespace string
- scalerIndex int
- name string
+ metadata map[string]string
+ namespace string
+ triggerIndex int
+ name string
}
var getMetricSpecForScalingTestDataset = []workloadGetMetricSpecForScalingTestData{
@@ -112,7 +112,7 @@ func TestWorkloadGetMetricSpecForScaling(t *testing.T) {
AuthParams: map[string]string{},
GlobalHTTPTimeout: 1000 * time.Millisecond,
ScalableObjectNamespace: testData.namespace,
- ScalerIndex: testData.scalerIndex,
+ TriggerIndex: testData.triggerIndex,
},
)
metric := s.GetMetricSpecForScaling(context.Background())
diff --git a/pkg/scalers/liiklus/LiiklusService.pb.go b/pkg/scalers/liiklus/LiiklusService.pb.go
index c5fac74bc95..30ce47cf816 100644
--- a/pkg/scalers/liiklus/LiiklusService.pb.go
+++ b/pkg/scalers/liiklus/LiiklusService.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
-// protoc v4.23.2
+// protoc-gen-go v1.31.0
+// protoc v4.23.4
// source: LiiklusService.proto
package liiklus
diff --git a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go
index 51480c39dae..0e3731ee10d 100644
--- a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go
+++ b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
-// - protoc v4.23.2
+// - protoc v4.23.4
// source: LiiklusService.proto
package liiklus
diff --git a/pkg/scalers/liiklus_scaler.go b/pkg/scalers/liiklus_scaler.go
index 90b1901abec..465c019a630 100644
--- a/pkg/scalers/liiklus_scaler.go
+++ b/pkg/scalers/liiklus_scaler.go
@@ -32,7 +32,7 @@ type liiklusMetadata struct {
topic string
group string
groupVersion uint32
- scalerIndex int
+ triggerIndex int
}
const (
@@ -105,7 +105,7 @@ func (s *liiklusScaler) GetMetricsAndActivity(ctx context.Context, metricName st
func (s *liiklusScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("liiklus-%s", s.metadata.topic))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("liiklus-%s", s.metadata.topic))),
},
Target: GetMetricTarget(s.metricType, s.metadata.lagThreshold),
}
@@ -202,6 +202,6 @@ func parseLiiklusMetadata(config *ScalerConfig) (*liiklusMetadata, error) {
groupVersion: groupVersion,
lagThreshold: lagThreshold,
activationLagThreshold: activationLagThreshold,
- scalerIndex: config.ScalerIndex,
+ triggerIndex: config.TriggerIndex,
}, nil
}
diff --git a/pkg/scalers/liiklus_scaler_test.go b/pkg/scalers/liiklus_scaler_test.go
index 28772f1165a..38737ce9a81 100644
--- a/pkg/scalers/liiklus_scaler_test.go
+++ b/pkg/scalers/liiklus_scaler_test.go
@@ -24,7 +24,7 @@ type parseLiiklusMetadataTestData struct {
type liiklusMetricIdentifier struct {
metadataTestData *parseLiiklusMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -171,7 +171,7 @@ func TestLiiklusScalerGetMetricsBehavior(t *testing.T) {
func TestLiiklusGetMetricSpecForScaling(t *testing.T) {
for _, testData := range liiklusMetricIdentifiers {
- meta, err := parseLiiklusMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ScalerIndex: testData.scalerIndex})
+ meta, err := parseLiiklusMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/loki_scaler.go b/pkg/scalers/loki_scaler.go
index dc822674d4e..6b3b9f64cc7 100644
--- a/pkg/scalers/loki_scaler.go
+++ b/pkg/scalers/loki_scaler.go
@@ -45,7 +45,7 @@ type lokiMetadata struct {
threshold float64
activationThreshold float64
lokiAuth *authentication.AuthMeta
- scalerIndex int
+ triggerIndex int
tenantName string
ignoreNullValues bool
unsafeSsl bool
@@ -151,7 +151,7 @@ func parseLokiMetadata(config *ScalerConfig) (meta *lokiMetadata, err error) {
meta.unsafeSsl = unsafeSslValue
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
// parse auth configs from ScalerConfig
auth, err := authentication.GetAuthConfigs(config.TriggerMetadata, config.AuthParams)
@@ -175,7 +175,7 @@ func (s *lokiScaler) Close(context.Context) error {
func (s *lokiScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, "loki"),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, "loki"),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.threshold),
}
diff --git a/pkg/scalers/metrics_api_scaler.go b/pkg/scalers/metrics_api_scaler.go
index 7f130a6679d..b8f5e3ab42b 100644
--- a/pkg/scalers/metrics_api_scaler.go
+++ b/pkg/scalers/metrics_api_scaler.go
@@ -57,7 +57,7 @@ type metricsAPIScalerMetadata struct {
enableBearerAuth bool
bearerToken string
- scalerIndex int
+ triggerIndex int
}
const (
@@ -96,7 +96,7 @@ func NewMetricsAPIScaler(config *ScalerConfig) (Scaler, error) {
func parseMetricsAPIMetadata(config *ScalerConfig) (*metricsAPIScalerMetadata, error) {
meta := metricsAPIScalerMetadata{}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
meta.unsafeSsl = false
if val, ok := config.TriggerMetadata["unsafeSsl"]; ok {
@@ -267,7 +267,7 @@ func (s *metricsAPIScaler) Close(context.Context) error {
func (s *metricsAPIScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("metric-api-%s", s.metadata.valueLocation))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("metric-api-%s", s.metadata.valueLocation))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.targetValue),
}
diff --git a/pkg/scalers/metrics_api_scaler_test.go b/pkg/scalers/metrics_api_scaler_test.go
index c6dc79d7658..6bbbe7411ef 100644
--- a/pkg/scalers/metrics_api_scaler_test.go
+++ b/pkg/scalers/metrics_api_scaler_test.go
@@ -89,12 +89,12 @@ func TestParseMetricsAPIMetadata(t *testing.T) {
type metricsAPIMetricIdentifier struct {
metadataTestData *metricsAPIMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
var metricsAPIMetricIdentifiers = []metricsAPIMetricIdentifier{
- {metadataTestData: &testMetricsAPIMetadata[1], scalerIndex: 1, name: "s1-metric-api-metric-test"},
+ {metadataTestData: &testMetricsAPIMetadata[1], triggerIndex: 1, name: "s1-metric-api-metric-test"},
}
func TestMetricsAPIGetMetricSpecForScaling(t *testing.T) {
@@ -105,7 +105,7 @@ func TestMetricsAPIGetMetricSpecForScaling(t *testing.T) {
TriggerMetadata: testData.metadataTestData.metadata,
AuthParams: map[string]string{},
GlobalHTTPTimeout: 3000 * time.Millisecond,
- ScalerIndex: testData.scalerIndex,
+ TriggerIndex: testData.triggerIndex,
},
)
if err != nil {
diff --git a/pkg/scalers/mongo_scaler.go b/pkg/scalers/mongo_scaler.go
index 271e5567c3b..f79e6508815 100644
--- a/pkg/scalers/mongo_scaler.go
+++ b/pkg/scalers/mongo_scaler.go
@@ -64,7 +64,7 @@ type mongoDBMetadata struct {
// The index of the scaler inside the ScaledObject
// +internal
- scalerIndex int
+ triggerIndex int
}
// Default variables and settings
@@ -197,7 +197,7 @@ func parseMongoDBMetadata(config *ScalerConfig) (*mongoDBMetadata, string, error
// nosemgrep: db-connection-string
connStr = fmt.Sprintf("mongodb://%s:%s@%s/%s", url.QueryEscape(meta.username), url.QueryEscape(meta.password), addr, meta.dbName)
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, connStr, nil
}
@@ -250,7 +250,7 @@ func (s *mongoDBScaler) GetMetricsAndActivity(ctx context.Context, metricName st
func (s *mongoDBScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("mongodb-%s", s.metadata.collection))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("mongodb-%s", s.metadata.collection))),
},
Target: GetMetricTarget(s.metricType, s.metadata.queryValue),
}
diff --git a/pkg/scalers/mongo_scaler_test.go b/pkg/scalers/mongo_scaler_test.go
index c3c833b91fe..26509d673f8 100644
--- a/pkg/scalers/mongo_scaler_test.go
+++ b/pkg/scalers/mongo_scaler_test.go
@@ -28,7 +28,7 @@ type mongoDBConnectionStringTestData struct {
type mongoDBMetricIdentifier struct {
metadataTestData *parseMongoDBMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -84,8 +84,8 @@ var mongoDBConnectionStringTestDatas = []mongoDBConnectionStringTestData{
}
var mongoDBMetricIdentifiers = []mongoDBMetricIdentifier{
- {metadataTestData: &testMONGODBMetadata[2], scalerIndex: 0, name: "s0-mongodb-demo"},
- {metadataTestData: &testMONGODBMetadata[2], scalerIndex: 1, name: "s1-mongodb-demo"},
+ {metadataTestData: &testMONGODBMetadata[2], triggerIndex: 0, name: "s0-mongodb-demo"},
+ {metadataTestData: &testMONGODBMetadata[2], triggerIndex: 1, name: "s1-mongodb-demo"},
}
func TestParseMongoDBMetadata(t *testing.T) {
@@ -112,7 +112,7 @@ func TestParseMongoDBConnectionString(t *testing.T) {
func TestMongoDBGetMetricSpecForScaling(t *testing.T) {
for _, testData := range mongoDBMetricIdentifiers {
- meta, _, err := parseMongoDBMetadata(&ScalerConfig{ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, TriggerMetadata: testData.metadataTestData.metadata, ScalerIndex: testData.scalerIndex})
+ meta, _, err := parseMongoDBMetadata(&ScalerConfig{ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/mssql_scaler.go b/pkg/scalers/mssql_scaler.go
index 353420d326e..53880043437 100644
--- a/pkg/scalers/mssql_scaler.go
+++ b/pkg/scalers/mssql_scaler.go
@@ -64,7 +64,7 @@ type mssqlMetadata struct {
activationTargetValue float64
// The index of the scaler inside the ScaledObject
// +internal
- scalerIndex int
+ triggerIndex int
}
// NewMSSQLScaler creates a new mssql scaler
@@ -167,7 +167,7 @@ func parseMSSQLMetadata(config *ScalerConfig) (*mssqlMetadata, error) {
meta.password = config.ResolvedEnv[config.TriggerMetadata["passwordFromEnv"]]
}
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -227,7 +227,7 @@ func getMSSQLConnectionString(meta *mssqlMetadata) string {
func (s *mssqlScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, "mssql"),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, "mssql"),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.targetValue),
}
diff --git a/pkg/scalers/mssql_scaler_test.go b/pkg/scalers/mssql_scaler_test.go
index 734be3e234f..67877259b7a 100644
--- a/pkg/scalers/mssql_scaler_test.go
+++ b/pkg/scalers/mssql_scaler_test.go
@@ -20,7 +20,7 @@ type mssqlTestData struct {
type mssqlMetricIdentifier struct {
metadataTestData *mssqlTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -163,7 +163,7 @@ func TestMSSQLGetMetricSpecForScaling(t *testing.T) {
ResolvedEnv: testData.metadataTestData.resolvedEnv,
TriggerMetadata: testData.metadataTestData.metadata,
AuthParams: testData.metadataTestData.authParams,
- ScalerIndex: testData.scalerIndex,
+ TriggerIndex: testData.triggerIndex,
}
meta, err := parseMSSQLMetadata(&config)
if err != nil {
diff --git a/pkg/scalers/mysql_scaler.go b/pkg/scalers/mysql_scaler.go
index aa5154efd3f..59a854164f4 100644
--- a/pkg/scalers/mysql_scaler.go
+++ b/pkg/scalers/mysql_scaler.go
@@ -140,7 +140,7 @@ func parseMySQLMetadata(config *ScalerConfig) (*mySQLMetadata, error) {
if meta.connectionString != "" {
meta.dbName = parseMySQLDbNameFromConnectionStr(meta.connectionString)
}
- meta.metricName = GenerateMetricNameWithIndex(config.ScalerIndex, kedautil.NormalizeString(fmt.Sprintf("mysql-%s", meta.dbName)))
+ meta.metricName = GenerateMetricNameWithIndex(config.TriggerIndex, kedautil.NormalizeString(fmt.Sprintf("mysql-%s", meta.dbName)))
return &meta, nil
}
diff --git a/pkg/scalers/mysql_scaler_test.go b/pkg/scalers/mysql_scaler_test.go
index adb7d5a7665..141e5124a1b 100644
--- a/pkg/scalers/mysql_scaler_test.go
+++ b/pkg/scalers/mysql_scaler_test.go
@@ -18,7 +18,7 @@ type parseMySQLMetadataTestData struct {
type mySQLMetricIdentifier struct {
metadataTestData *parseMySQLMetadataTestData
- scalerIndex int
+ triggerIndex int
metricName string
}
@@ -61,8 +61,8 @@ var testMySQLMetadata = []parseMySQLMetadataTestData{
}
var mySQLMetricIdentifiers = []mySQLMetricIdentifier{
- {metadataTestData: &testMySQLMetadata[1], scalerIndex: 0, metricName: "s0-mysql-stats_db"},
- {metadataTestData: &testMySQLMetadata[2], scalerIndex: 1, metricName: "s1-mysql-test_dbname"},
+ {metadataTestData: &testMySQLMetadata[1], triggerIndex: 0, metricName: "s0-mysql-stats_db"},
+ {metadataTestData: &testMySQLMetadata[2], triggerIndex: 1, metricName: "s1-mysql-test_dbname"},
}
func TestParseMySQLMetadata(t *testing.T) {
@@ -100,7 +100,7 @@ func TestMetadataToConnectionStrBuildNew(t *testing.T) {
func TestMySQLGetMetricSpecForScaling(t *testing.T) {
for _, testData := range mySQLMetricIdentifiers {
- meta, err := parseMySQLMetadata(&ScalerConfig{ResolvedEnv: testData.metadataTestData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: nil, ScalerIndex: testData.scalerIndex})
+ meta, err := parseMySQLMetadata(&ScalerConfig{ResolvedEnv: testData.metadataTestData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: nil, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/nats_jetstream_scaler.go b/pkg/scalers/nats_jetstream_scaler.go
index 6df5f16169c..5129730a636 100644
--- a/pkg/scalers/nats_jetstream_scaler.go
+++ b/pkg/scalers/nats_jetstream_scaler.go
@@ -44,7 +44,7 @@ type natsJetStreamMetadata struct {
lagThreshold int64
activationLagThreshold int64
clusterSize int
- scalerIndex int
+ triggerIndex int
}
type jetStreamEndpointResponse struct {
@@ -171,7 +171,7 @@ func parseNATSJetStreamMetadata(config *ScalerConfig) (natsJetStreamMetadata, er
meta.activationLagThreshold = activationTargetQueryValue
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
natsServerEndpoint, err := GetFromAuthOrMeta(config, "natsServerMonitoringEndpoint")
if err != nil {
@@ -456,7 +456,7 @@ func (s *natsJetStreamScaler) GetMetricSpecForScaling(context.Context) []v2.Metr
metricName := kedautil.NormalizeString(fmt.Sprintf("nats-jetstream-%s", s.metadata.stream))
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName),
},
Target: GetMetricTarget(s.metricType, s.metadata.lagThreshold),
}
diff --git a/pkg/scalers/nats_jetstream_scaler_test.go b/pkg/scalers/nats_jetstream_scaler_test.go
index 5a4e18db84b..1997f39e8f7 100644
--- a/pkg/scalers/nats_jetstream_scaler_test.go
+++ b/pkg/scalers/nats_jetstream_scaler_test.go
@@ -30,7 +30,7 @@ type parseNATSJetStreamMockResponsesTestData struct {
type natsJetStreamMetricIdentifier struct {
metadataTestData *parseNATSJetStreamMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -88,7 +88,7 @@ func TestNATSJetStreamParseMetadata(t *testing.T) {
func TestNATSJetStreamGetMetricSpecForScaling(t *testing.T) {
for _, testData := range natsJetStreamMetricIdentifiers {
ctx := context.Background()
- meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ScalerIndex: testData.scalerIndex})
+ meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
@@ -238,7 +238,7 @@ func TestNATSJetStreamIsActive(t *testing.T) {
defer srv.Close()
ctx := context.Background()
- meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: mockResponse.metadata.metadataTestData.metadata, ScalerIndex: mockResponse.metadata.scalerIndex})
+ meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: mockResponse.metadata.metadataTestData.metadata, TriggerIndex: mockResponse.metadata.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
@@ -247,7 +247,7 @@ func TestNATSJetStreamIsActive(t *testing.T) {
stream: nil,
metadata: meta,
httpClient: http.DefaultClient,
- logger: InitializeLogger(&ScalerConfig{TriggerMetadata: mockResponse.metadata.metadataTestData.metadata, ScalerIndex: mockResponse.metadata.scalerIndex}, "nats_jetstream_scaler"),
+ logger: InitializeLogger(&ScalerConfig{TriggerMetadata: mockResponse.metadata.metadataTestData.metadata, TriggerIndex: mockResponse.metadata.triggerIndex}, "nats_jetstream_scaler"),
}
_, isActive, err := mockJetStreamScaler.GetMetricsAndActivity(ctx, "metric_name")
@@ -266,7 +266,7 @@ func TestNATSJetStreamIsActive(t *testing.T) {
func TestNewNATSJetStreamScaler(t *testing.T) {
// All Good
- _, err := NewNATSJetStreamScaler(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, ScalerIndex: 0})
+ _, err := NewNATSJetStreamScaler(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, TriggerIndex: 0})
if err != nil {
t.Error("Expected success for New NATS JetStream Scaler but got error", err)
}
@@ -293,7 +293,7 @@ func TestNATSJetStreamGetMetrics(t *testing.T) {
}()
ctx := context.Background()
- meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: mockResponse.metadata.metadataTestData.metadata, ScalerIndex: mockResponse.metadata.scalerIndex})
+ meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: mockResponse.metadata.metadataTestData.metadata, TriggerIndex: mockResponse.metadata.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
@@ -302,7 +302,7 @@ func TestNATSJetStreamGetMetrics(t *testing.T) {
stream: nil,
metadata: meta,
httpClient: http.DefaultClient,
- logger: InitializeLogger(&ScalerConfig{TriggerMetadata: mockResponse.metadata.metadataTestData.metadata, ScalerIndex: mockResponse.metadata.scalerIndex}, "nats_jetstream_scaler"),
+ logger: InitializeLogger(&ScalerConfig{TriggerMetadata: mockResponse.metadata.metadataTestData.metadata, TriggerIndex: mockResponse.metadata.triggerIndex}, "nats_jetstream_scaler"),
}
_, _, err = mockJetStreamScaler.GetMetricsAndActivity(ctx, "metric_name")
@@ -380,7 +380,7 @@ func TestNATSJetStreamgetNATSJetstreamMonitoringData(t *testing.T) {
}()
ctx := context.Background()
- meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, ScalerIndex: 0})
+ meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, TriggerIndex: 0})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
@@ -389,7 +389,7 @@ func TestNATSJetStreamgetNATSJetstreamMonitoringData(t *testing.T) {
stream: nil,
metadata: meta,
httpClient: http.DefaultClient,
- logger: InitializeLogger(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, ScalerIndex: 0}, "nats_jetstream_scaler"),
+ logger: InitializeLogger(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, TriggerIndex: 0}, "nats_jetstream_scaler"),
}
err = mockJetStreamScaler.getNATSJetstreamMonitoringData(ctx, mockJetStreamScaler.metadata.monitoringURL)
@@ -402,7 +402,7 @@ func TestNATSJetStreamGetNATSJetstreamNodeURL(t *testing.T) {
invalidJSONServer := natsMockHTTPJetStreamServer(t, []byte(`{invalidJSON}`))
defer invalidJSONServer.Close()
- meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, ScalerIndex: 0})
+ meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, TriggerIndex: 0})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
@@ -411,7 +411,7 @@ func TestNATSJetStreamGetNATSJetstreamNodeURL(t *testing.T) {
stream: nil,
metadata: meta,
httpClient: http.DefaultClient,
- logger: InitializeLogger(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, ScalerIndex: 0}, "nats_jetstream_scaler"),
+ logger: InitializeLogger(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, TriggerIndex: 0}, "nats_jetstream_scaler"),
}
mockJetStreamScaler.metadata.monitoringURL = "234234:::::34234234;;;;really_bad_URL;;/"
@@ -426,7 +426,7 @@ func TestNATSJetStreamGetNATSJetstreamServerURL(t *testing.T) {
invalidJSONServer := natsMockHTTPJetStreamServer(t, []byte(`{invalidJSON}`))
defer invalidJSONServer.Close()
- meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, ScalerIndex: 0})
+ meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, TriggerIndex: 0})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
@@ -435,7 +435,7 @@ func TestNATSJetStreamGetNATSJetstreamServerURL(t *testing.T) {
stream: nil,
metadata: meta,
httpClient: http.DefaultClient,
- logger: InitializeLogger(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, ScalerIndex: 0}, "nats_jetstream_scaler"),
+ logger: InitializeLogger(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, TriggerIndex: 0}, "nats_jetstream_scaler"),
}
mockJetStreamScaler.metadata.monitoringURL = "234234:::::34234234;;;;really_bad_URL;;/"
@@ -447,7 +447,7 @@ func TestNATSJetStreamGetNATSJetstreamServerURL(t *testing.T) {
}
func TestInvalidateNATSJetStreamCachedMonitoringData(t *testing.T) {
- meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, ScalerIndex: 0})
+ meta, err := parseNATSJetStreamMetadata(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, TriggerIndex: 0})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
@@ -456,14 +456,14 @@ func TestInvalidateNATSJetStreamCachedMonitoringData(t *testing.T) {
stream: nil,
metadata: meta,
httpClient: http.DefaultClient,
- logger: InitializeLogger(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, ScalerIndex: 0}, "nats_jetstream_scaler"),
+ logger: InitializeLogger(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, TriggerIndex: 0}, "nats_jetstream_scaler"),
}
mockJetStreamScaler.invalidateNATSJetStreamCachedMonitoringData()
}
func TestNATSJetStreamClose(t *testing.T) {
- mockJetStreamScaler, err := NewNATSJetStreamScaler(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, ScalerIndex: 0})
+ mockJetStreamScaler, err := NewNATSJetStreamScaler(&ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, TriggerIndex: 0})
if err != nil {
t.Error("Expected success for New NATS JetStream Scaler but got error", err)
}
diff --git a/pkg/scalers/newrelic_scaler.go b/pkg/scalers/newrelic_scaler.go
index 8aef0a71de8..7c820fd0f77 100644
--- a/pkg/scalers/newrelic_scaler.go
+++ b/pkg/scalers/newrelic_scaler.go
@@ -40,7 +40,7 @@ type newrelicMetadata struct {
nrql string
threshold float64
activationThreshold float64
- scalerIndex int
+ triggerIndex int
}
func NewNewRelicScaler(config *ScalerConfig) (Scaler, error) {
@@ -143,7 +143,7 @@ func parseNewRelicMetadata(config *ScalerConfig, logger logr.Logger) (*newrelicM
} else {
meta.noDataError = false
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -187,7 +187,7 @@ func (s *newrelicScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpe
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.threshold),
}
diff --git a/pkg/scalers/newrelic_scaler_test.go b/pkg/scalers/newrelic_scaler_test.go
index 84e24a745d3..346c0101b4a 100644
--- a/pkg/scalers/newrelic_scaler_test.go
+++ b/pkg/scalers/newrelic_scaler_test.go
@@ -16,7 +16,7 @@ type parseNewRelicMetadataTestData struct {
type newrelicMetricIdentifier struct {
metadataTestData *parseNewRelicMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -73,7 +73,7 @@ func TestNewRelicParseMetadata(t *testing.T) {
}
func TestNewRelicGetMetricSpecForScaling(t *testing.T) {
for _, testData := range newrelicMetricIdentifiers {
- meta, err := parseNewRelicMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ meta, err := parseNewRelicMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/openstack_metrics_scaler.go b/pkg/scalers/openstack_metrics_scaler.go
index 4c644117508..9dbe92e5fbc 100644
--- a/pkg/scalers/openstack_metrics_scaler.go
+++ b/pkg/scalers/openstack_metrics_scaler.go
@@ -34,7 +34,7 @@ type openstackMetricMetadata struct {
threshold float64
activationThreshold float64
timeout int
- scalerIndex int
+ triggerIndex int
}
type openstackMetricAuthenticationMetadata struct {
@@ -183,7 +183,7 @@ func parseOpenstackMetricMetadata(config *ScalerConfig, logger logr.Logger) (*op
} else {
meta.timeout = metricDefaultHTTPClientTimeout
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -219,7 +219,7 @@ func (s *openstackMetricScaler) GetMetricSpecForScaling(context.Context) []v2.Me
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.threshold),
}
diff --git a/pkg/scalers/openstack_metrics_scaler_test.go b/pkg/scalers/openstack_metrics_scaler_test.go
index 47f6525795e..ddfd96feed0 100644
--- a/pkg/scalers/openstack_metrics_scaler_test.go
+++ b/pkg/scalers/openstack_metrics_scaler_test.go
@@ -22,7 +22,7 @@ type openstackMetricScalerMetricIdentifier struct {
resolvedEnv map[string]string
metadataTestData *parseOpenstackMetricMetadataTestData
authMetadataTestData *parseOpenstackMetricAuthMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -107,13 +107,13 @@ func TestOpenstackMetricsGetMetricsForSpecScaling(t *testing.T) {
for _, testData := range testCases {
testData := testData
- meta, err := parseOpenstackMetricMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ meta, err := parseOpenstackMetricMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, TriggerIndex: testData.triggerIndex}, logr.Discard())
if err != nil {
t.Fatal("Could not parse metadata from openstack metrics scaler")
}
- _, err = parseOpenstackMetricAuthenticationMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, ScalerIndex: testData.scalerIndex})
+ _, err = parseOpenstackMetricAuthenticationMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("could not parse openstack metric authentication metadata")
@@ -145,7 +145,7 @@ func TestOpenstackMetricsGetMetricsForSpecScalingInvalidMetaData(t *testing.T) {
for _, testData := range testCases {
testData := testData
t.Run(testData.name, func(pt *testing.T) {
- _, err := parseOpenstackMetricMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, ScalerIndex: testData.scalerIndex}, logr.Discard())
+ _, err := parseOpenstackMetricMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, TriggerIndex: testData.triggerIndex}, logr.Discard())
assert.NotNil(t, err)
})
}
@@ -164,7 +164,7 @@ func TestOpenstackMetricAuthenticationInvalidAuthMetadata(t *testing.T) {
for _, testData := range testCases {
testData := testData
t.Run(testData.name, func(ptr *testing.T) {
- _, err := parseOpenstackMetricAuthenticationMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, ScalerIndex: testData.scalerIndex})
+ _, err := parseOpenstackMetricAuthenticationMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, TriggerIndex: testData.triggerIndex})
assert.NotNil(t, err)
})
}
diff --git a/pkg/scalers/openstack_swift_scaler.go b/pkg/scalers/openstack_swift_scaler.go
index c8b320aec4e..1e33d9d2d0f 100644
--- a/pkg/scalers/openstack_swift_scaler.go
+++ b/pkg/scalers/openstack_swift_scaler.go
@@ -35,7 +35,7 @@ type openstackSwiftMetadata struct {
objectLimit string
httpClientTimeout int
onlyFiles bool
- scalerIndex int
+ triggerIndex int
}
type openstackSwiftAuthenticationMetadata struct {
@@ -196,7 +196,7 @@ func parseOpenstackSwiftMetadata(config *ScalerConfig) (*openstackSwiftMetadata,
} else {
meta.objectLimit = defaultObjectLimit
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -279,7 +279,7 @@ func (s *openstackSwiftScaler) GetMetricSpecForScaling(context.Context) []v2.Met
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName),
},
Target: GetMetricTarget(s.metricType, s.metadata.objectCount),
}
diff --git a/pkg/scalers/openstack_swift_scaler_test.go b/pkg/scalers/openstack_swift_scaler_test.go
index 40ef890e42b..9e230b3fec6 100644
--- a/pkg/scalers/openstack_swift_scaler_test.go
+++ b/pkg/scalers/openstack_swift_scaler_test.go
@@ -103,11 +103,11 @@ func TestOpenstackSwiftGetMetricSpecForScaling(t *testing.T) {
for _, testData := range testCases {
testData := testData
- meta, err := parseOpenstackSwiftMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, ScalerIndex: testData.scaledIndex})
+ meta, err := parseOpenstackSwiftMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, TriggerIndex: testData.scaledIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
- _, err = parseOpenstackSwiftAuthenticationMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, ScalerIndex: testData.scaledIndex})
+ _, err = parseOpenstackSwiftAuthenticationMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, TriggerIndex: testData.scaledIndex})
if err != nil {
t.Fatal("Could not parse auth metadata:", err)
}
@@ -135,7 +135,7 @@ func TestParseOpenstackSwiftMetadataForInvalidCases(t *testing.T) {
for _, testData := range testCases {
testData := testData
t.Run(testData.name, func(pt *testing.T) {
- _, err := parseOpenstackSwiftMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, ScalerIndex: testData.scaledIndex})
+ _, err := parseOpenstackSwiftMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, TriggerIndex: testData.scaledIndex})
assert.NotNil(t, err)
})
}
@@ -155,7 +155,7 @@ func TestParseOpenstackSwiftAuthenticationMetadataForInvalidCases(t *testing.T)
for _, testData := range testCases {
testData := testData
t.Run(testData.name, func(pt *testing.T) {
- _, err := parseOpenstackSwiftAuthenticationMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, ScalerIndex: testData.scaledIndex})
+ _, err := parseOpenstackSwiftAuthenticationMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, TriggerIndex: testData.scaledIndex})
assert.NotNil(t, err)
})
}
diff --git a/pkg/scalers/postgresql_scaler.go b/pkg/scalers/postgresql_scaler.go
index b0f2551fbef..6f93aa77081 100644
--- a/pkg/scalers/postgresql_scaler.go
+++ b/pkg/scalers/postgresql_scaler.go
@@ -28,7 +28,7 @@ type postgreSQLMetadata struct {
activationTargetQueryValue float64
connection string
query string
- scalerIndex int
+ triggerIndex int
}
// NewPostgreSQLScaler creates a new postgreSQL scaler
@@ -137,7 +137,7 @@ func parsePostgreSQLMetadata(config *ScalerConfig) (*postgreSQLMetadata, error)
params = append(params, "password="+escapePostgreConnectionParameter(password))
meta.connection = strings.Join(params, " ")
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -179,7 +179,7 @@ func (s *postgreSQLScaler) getActiveNumber(ctx context.Context) (float64, error)
func (s *postgreSQLScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString("postgresql")),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString("postgresql")),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.targetQueryValue),
}
diff --git a/pkg/scalers/postgresql_scaler_test.go b/pkg/scalers/postgresql_scaler_test.go
index f719c1b0d6a..c3ad071368a 100644
--- a/pkg/scalers/postgresql_scaler_test.go
+++ b/pkg/scalers/postgresql_scaler_test.go
@@ -43,7 +43,7 @@ var postgreSQLMetricIdentifiers = []postgreSQLMetricIdentifier{
func TestPosgresSQLGetMetricSpecForScaling(t *testing.T) {
for _, testData := range postgreSQLMetricIdentifiers {
- meta, err := parsePostgreSQLMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authParam, ScalerIndex: testData.scaleIndex})
+ meta, err := parsePostgreSQLMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authParam, TriggerIndex: testData.scaleIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
@@ -77,7 +77,7 @@ var testPostgreSQLConnectionstring = []postgreSQLConnectionStringTestData{
func TestPosgresSQLConnectionStringGeneration(t *testing.T) {
for _, testData := range testPostgreSQLConnectionstring {
- meta, err := parsePostgreSQLMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadata, AuthParams: testData.authParam, ScalerIndex: 0})
+ meta, err := parsePostgreSQLMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadata, AuthParams: testData.authParam, TriggerIndex: 0})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/predictkube_scaler.go b/pkg/scalers/predictkube_scaler.go
index 336a4eed342..fdff81413cb 100644
--- a/pkg/scalers/predictkube_scaler.go
+++ b/pkg/scalers/predictkube_scaler.go
@@ -91,7 +91,7 @@ type predictKubeMetadata struct {
query string
threshold float64
activationThreshold float64
- scalerIndex int
+ triggerIndex int
}
func (s *PredictKubeScaler) setupClientConn() error {
@@ -183,7 +183,7 @@ func (s *PredictKubeScaler) GetMetricSpecForScaling(context.Context) []v2.Metric
metricName := kedautil.NormalizeString(fmt.Sprintf("predictkube-%s", predictKubeMetricPrefix))
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.threshold),
}
@@ -271,7 +271,7 @@ func (s *PredictKubeScaler) doQuery(ctx context.Context) ([]*commonproto.Item, e
// parsePrometheusResult parsing response from prometheus server.
func (s *PredictKubeScaler) parsePrometheusResult(result model.Value) (out []*commonproto.Item, err error) {
- metricName := GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("predictkube-%s", predictKubeMetricPrefix)))
+ metricName := GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("predictkube-%s", predictKubeMetricPrefix)))
switch result.Type() {
case model.ValVector:
if res, ok := result.(model.Vector); ok {
@@ -421,7 +421,7 @@ func parsePredictKubeMetadata(config *ScalerConfig) (result *predictKubeMetadata
meta.activationThreshold = activationThreshold
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
if val, ok := config.AuthParams["apiKey"]; ok {
err = validate.Var(val, "jwt")
diff --git a/pkg/scalers/predictkube_scaler_test.go b/pkg/scalers/predictkube_scaler_test.go
index af727de1ffc..bf7c625d832 100644
--- a/pkg/scalers/predictkube_scaler_test.go
+++ b/pkg/scalers/predictkube_scaler_test.go
@@ -154,7 +154,7 @@ func TestPredictKubeParseMetadata(t *testing.T) {
type predictKubeMetricIdentifier struct {
metadataTestData *predictKubeMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -178,7 +178,7 @@ func TestPredictKubeGetMetricSpecForScaling(t *testing.T) {
context.Background(), &ScalerConfig{
TriggerMetadata: testData.metadataTestData.metadata,
AuthParams: testData.metadataTestData.authParams,
- ScalerIndex: testData.scalerIndex,
+ TriggerIndex: testData.triggerIndex,
},
)
assert.NoError(t, err)
@@ -212,7 +212,7 @@ func TestPredictKubeGetMetrics(t *testing.T) {
context.Background(), &ScalerConfig{
TriggerMetadata: testData.metadataTestData.metadata,
AuthParams: testData.metadataTestData.authParams,
- ScalerIndex: testData.scalerIndex,
+ TriggerIndex: testData.triggerIndex,
},
)
assert.NoError(t, err)
diff --git a/pkg/scalers/prometheus_scaler.go b/pkg/scalers/prometheus_scaler.go
index d386926862b..f204167b3ff 100644
--- a/pkg/scalers/prometheus_scaler.go
+++ b/pkg/scalers/prometheus_scaler.go
@@ -54,7 +54,7 @@ type prometheusMetadata struct {
activationThreshold float64
prometheusAuth *authentication.AuthMeta
namespace string
- scalerIndex int
+ triggerIndex int
customHeaders map[string]string
// sometimes should consider there is an error we can accept
// default value is true/t, to ignore the null value return from prometheus
@@ -224,7 +224,7 @@ func parsePrometheusMetadata(config *ScalerConfig) (meta *prometheusMetadata, er
meta.unsafeSsl = unsafeSslValue
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
err = parseAuthConfig(config, meta)
if err != nil {
@@ -260,7 +260,7 @@ func (s *prometheusScaler) GetMetricSpecForScaling(context.Context) []v2.MetricS
metricName := kedautil.NormalizeString("prometheus")
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.threshold),
}
diff --git a/pkg/scalers/prometheus_scaler_test.go b/pkg/scalers/prometheus_scaler_test.go
index 4f4238c99bf..57e8d448904 100644
--- a/pkg/scalers/prometheus_scaler_test.go
+++ b/pkg/scalers/prometheus_scaler_test.go
@@ -27,7 +27,7 @@ type parsePrometheusMetadataTestData struct {
type prometheusMetricIdentifier struct {
metadataTestData *parsePrometheusMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -132,7 +132,7 @@ func TestPrometheusParseMetadata(t *testing.T) {
func TestPrometheusGetMetricSpecForScaling(t *testing.T) {
for _, testData := range prometheusMetricIdentifiers {
- meta, err := parsePrometheusMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ScalerIndex: testData.scalerIndex})
+ meta, err := parsePrometheusMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/pulsar_scaler.go b/pkg/scalers/pulsar_scaler.go
index 306e5a29f47..75cf1b96d4d 100644
--- a/pkg/scalers/pulsar_scaler.go
+++ b/pkg/scalers/pulsar_scaler.go
@@ -36,9 +36,9 @@ type pulsarMetadata struct {
pulsarAuth *authentication.AuthMeta
- statsURL string
- metricName string
- scalerIndex int
+ statsURL string
+ metricName string
+ triggerIndex int
}
const (
@@ -238,7 +238,7 @@ func parsePulsarMetadata(config *ScalerConfig, logger logr.Logger) (pulsarMetada
}
}
meta.pulsarAuth = auth
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return meta, nil
}
@@ -327,7 +327,7 @@ func (s *pulsarScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(s.metadata.metricName)),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(s.metadata.metricName)),
},
Target: v2.MetricTarget{
Type: v2.AverageValueMetricType,
diff --git a/pkg/scalers/rabbitmq_scaler.go b/pkg/scalers/rabbitmq_scaler.go
index 9484dee4de0..37854d2dce6 100644
--- a/pkg/scalers/rabbitmq_scaler.go
+++ b/pkg/scalers/rabbitmq_scaler.go
@@ -79,7 +79,7 @@ type rabbitMQMetadata struct {
pageSize int64 // specify the page size if useRegex is enabled
operation string // specify the operation to apply in case of multiples queues
timeout time.Duration // custom http timeout for a specific trigger
- scalerIndex int // scaler index
+ triggerIndex int // scaler index
// TLS
ca string
@@ -315,7 +315,7 @@ func parseRabbitMQMetadata(config *ScalerConfig) (*rabbitMQMetadata, error) {
if err := resolveTimeout(config, &meta); err != nil {
return nil, err
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -598,7 +598,7 @@ func (s *rabbitMQScaler) getQueueInfoViaHTTP(ctx context.Context) (*queueInfo, e
func (s *rabbitMQScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("rabbitmq-%s", url.QueryEscape(s.metadata.queueName)))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("rabbitmq-%s", url.QueryEscape(s.metadata.queueName)))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.value),
}
diff --git a/pkg/scalers/rabbitmq_scaler_test.go b/pkg/scalers/rabbitmq_scaler_test.go
index 11a10e2e8c7..1ba918dd47c 100644
--- a/pkg/scalers/rabbitmq_scaler_test.go
+++ b/pkg/scalers/rabbitmq_scaler_test.go
@@ -629,7 +629,7 @@ func TestGetPageSizeWithRegex(t *testing.T) {
func TestRabbitMQGetMetricSpecForScaling(t *testing.T) {
for _, testData := range rabbitMQMetricIdentifiers {
- meta, err := parseRabbitMQMetadata(&ScalerConfig{ResolvedEnv: sampleRabbitMqResolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: nil, ScalerIndex: testData.index})
+ meta, err := parseRabbitMQMetadata(&ScalerConfig{ResolvedEnv: sampleRabbitMqResolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: nil, TriggerIndex: testData.index})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/redis_scaler.go b/pkg/scalers/redis_scaler.go
index 81aa3797004..2420d21bccb 100644
--- a/pkg/scalers/redis_scaler.go
+++ b/pkg/scalers/redis_scaler.go
@@ -67,7 +67,7 @@ type redisMetadata struct {
listName string
databaseIndex int
connectionInfo redisConnectionInfo
- scalerIndex int
+ triggerIndex int
}
// NewRedisScaler creates a new redisScaler
@@ -294,7 +294,7 @@ func parseRedisMetadata(config *ScalerConfig, parserFn redisAddressParser) (*red
}
meta.databaseIndex = int(dbIndex)
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -307,7 +307,7 @@ func (s *redisScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
metricName := util.NormalizeString(fmt.Sprintf("redis-%s", s.metadata.listName))
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName),
},
Target: GetMetricTarget(s.metricType, s.metadata.listLength),
}
diff --git a/pkg/scalers/redis_scaler_test.go b/pkg/scalers/redis_scaler_test.go
index e89d245c86b..20903e8506e 100644
--- a/pkg/scalers/redis_scaler_test.go
+++ b/pkg/scalers/redis_scaler_test.go
@@ -28,7 +28,7 @@ type parseRedisMetadataTestData struct {
type redisMetricIdentifier struct {
metadataTestData *parseRedisMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -113,7 +113,7 @@ func TestRedisParseMetadata(t *testing.T) {
func TestRedisGetMetricSpecForScaling(t *testing.T) {
for _, testData := range redisMetricIdentifiers {
- meta, err := parseRedisMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testRedisResolvedEnv, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}, parseRedisAddress)
+ meta, err := parseRedisMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testRedisResolvedEnv, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex}, parseRedisAddress)
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/redis_streams_scaler.go b/pkg/scalers/redis_streams_scaler.go
index 644936950af..b4a03e00bde 100644
--- a/pkg/scalers/redis_streams_scaler.go
+++ b/pkg/scalers/redis_streams_scaler.go
@@ -60,7 +60,7 @@ type redisStreamsMetadata struct {
consumerGroupName string
databaseIndex int
connectionInfo redisConnectionInfo
- scalerIndex int
+ triggerIndex int
activationLagCount int64
}
@@ -330,7 +330,7 @@ func parseRedisStreamsMetadata(config *ScalerConfig, parseFn redisAddressParser)
meta.databaseIndex = int(dbIndex)
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -353,7 +353,7 @@ func (s *redisStreamsScaler) GetMetricSpecForScaling(context.Context) []v2.Metri
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("redis-streams-%s", s.metadata.streamName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("redis-streams-%s", s.metadata.streamName))),
},
Target: GetMetricTarget(s.metricType, metricValue),
}
diff --git a/pkg/scalers/redis_streams_scaler_test.go b/pkg/scalers/redis_streams_scaler_test.go
index 36e0a02b842..9345b30364c 100644
--- a/pkg/scalers/redis_streams_scaler_test.go
+++ b/pkg/scalers/redis_streams_scaler_test.go
@@ -172,7 +172,7 @@ type redisStreamsTestMetadata struct {
func TestRedisStreamsGetMetricSpecForScaling(t *testing.T) {
type redisStreamsMetricIdentifier struct {
metadataTestData *redisStreamsTestMetadata
- scalerIndex int
+ triggerIndex int
name string
}
@@ -189,7 +189,7 @@ func TestRedisStreamsGetMetricSpecForScaling(t *testing.T) {
}
for _, testData := range redisStreamMetricIdentifiers {
- meta, err := parseRedisStreamsMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: map[string]string{"REDIS_SERVICE": "my-address"}, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}, parseRedisAddress)
+ meta, err := parseRedisStreamsMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: map[string]string{"REDIS_SERVICE": "my-address"}, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex}, parseRedisAddress)
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/scaler.go b/pkg/scalers/scaler.go
index 50dcb220cfb..cce7eccc8d8 100644
--- a/pkg/scalers/scaler.go
+++ b/pkg/scalers/scaler.go
@@ -96,8 +96,11 @@ type ScalerConfig struct {
// PodIdentity
PodIdentity kedav1alpha1.AuthPodIdentity
- // ScalerIndex
- ScalerIndex int
+ // TriggerIndex
+ TriggerIndex int
+
+ // TriggerUniqueKey for the scaler across KEDA. Useful to identify uniquely the scaler, eg: AWS credentials cache
+ TriggerUniqueKey string
// MetricType
MetricType v2.MetricTargetType
@@ -131,19 +134,19 @@ func GetFromAuthOrMeta(config *ScalerConfig, field string) (string, error) {
}
// GenerateMetricNameWithIndex helps to add the index prefix to the metric name
-func GenerateMetricNameWithIndex(scalerIndex int, metricName string) string {
- return fmt.Sprintf("s%d-%s", scalerIndex, metricName)
+func GenerateMetricNameWithIndex(triggerIndex int, metricName string) string {
+ return fmt.Sprintf("s%d-%s", triggerIndex, metricName)
}
// RemoveIndexFromMetricName removes the index prefix from the metric name
-func RemoveIndexFromMetricName(scalerIndex int, metricName string) (string, error) {
+func RemoveIndexFromMetricName(triggerIndex int, metricName string) (string, error) {
metricNameSplit := strings.SplitN(metricName, "-", 2)
if len(metricNameSplit) != 2 {
return "", fmt.Errorf("metric name without index prefix")
}
indexPrefix, metricNameWithoutIndex := metricNameSplit[0], metricNameSplit[1]
- if indexPrefix != fmt.Sprintf("s%d", scalerIndex) {
+ if indexPrefix != fmt.Sprintf("s%d", triggerIndex) {
return "", fmt.Errorf("metric name contains incorrect index prefix")
}
diff --git a/pkg/scalers/scaler_test.go b/pkg/scalers/scaler_test.go
index 141c89985c4..4cd2f7ef00b 100644
--- a/pkg/scalers/scaler_test.go
+++ b/pkg/scalers/scaler_test.go
@@ -88,25 +88,25 @@ func TestGetMetricTarget(t *testing.T) {
func TestRemoveIndexFromMetricName(t *testing.T) {
cases := []struct {
- scalerIndex int
+ triggerIndex int
metricName string
expectedMetricNameWithoutIndexPrefix string
isError bool
}{
// Proper input
- {scalerIndex: 0, metricName: "s0-metricName", expectedMetricNameWithoutIndexPrefix: "metricName", isError: false},
- // Proper input with scalerIndex > 9
- {scalerIndex: 123, metricName: "s123-metricName", expectedMetricNameWithoutIndexPrefix: "metricName", isError: false},
+ {triggerIndex: 0, metricName: "s0-metricName", expectedMetricNameWithoutIndexPrefix: "metricName", isError: false},
+ // Proper input with triggerIndex > 9
+ {triggerIndex: 123, metricName: "s123-metricName", expectedMetricNameWithoutIndexPrefix: "metricName", isError: false},
// Incorrect index prefix
- {scalerIndex: 1, metricName: "s0-metricName", expectedMetricNameWithoutIndexPrefix: "", isError: true},
+ {triggerIndex: 1, metricName: "s0-metricName", expectedMetricNameWithoutIndexPrefix: "", isError: true},
// Incorrect index prefix
- {scalerIndex: 0, metricName: "0-metricName", expectedMetricNameWithoutIndexPrefix: "", isError: true},
+ {triggerIndex: 0, metricName: "0-metricName", expectedMetricNameWithoutIndexPrefix: "", isError: true},
// No index prefix
- {scalerIndex: 0, metricName: "metricName", expectedMetricNameWithoutIndexPrefix: "", isError: true},
+ {triggerIndex: 0, metricName: "metricName", expectedMetricNameWithoutIndexPrefix: "", isError: true},
}
for _, testCase := range cases {
- metricName, err := RemoveIndexFromMetricName(testCase.scalerIndex, testCase.metricName)
+ metricName, err := RemoveIndexFromMetricName(testCase.triggerIndex, testCase.metricName)
if err != nil && !testCase.isError {
t.Error("Expected success but got error", err)
}
diff --git a/pkg/scalers/selenium_grid_scaler.go b/pkg/scalers/selenium_grid_scaler.go
index 570961006f6..508d857c424 100644
--- a/pkg/scalers/selenium_grid_scaler.go
+++ b/pkg/scalers/selenium_grid_scaler.go
@@ -34,7 +34,7 @@ type seleniumGridScalerMetadata struct {
activationThreshold int64
browserVersion string
unsafeSsl bool
- scalerIndex int
+ triggerIndex int
platformName string
}
@@ -152,7 +152,7 @@ func parseSeleniumGridScalerMetadata(config *ScalerConfig) (*seleniumGridScalerM
meta.platformName = DefaultPlatformName
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -179,7 +179,7 @@ func (s *seleniumGridScaler) GetMetricSpecForScaling(context.Context) []v2.Metri
metricName := kedautil.NormalizeString(fmt.Sprintf("seleniumgrid-%s", s.metadata.browserName))
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName),
},
Target: GetMetricTarget(s.metricType, s.metadata.targetValue),
}
diff --git a/pkg/scalers/solace_scaler.go b/pkg/scalers/solace_scaler.go
index 8838a1f0622..30c2c4744fb 100644
--- a/pkg/scalers/solace_scaler.go
+++ b/pkg/scalers/solace_scaler.go
@@ -103,7 +103,7 @@ type SolaceMetadata struct {
activationMsgSpoolUsageTarget int // Spool Use Target in Megabytes
activationMsgRxRateTarget int // Ingress Rate Target per consumer in msgs/second
// Scaler index
- scalerIndex int
+ triggerIndex int
}
// SEMP API Response Root Struct
@@ -262,7 +262,7 @@ func parseSolaceMetadata(config *ScalerConfig) (*SolaceMetadata, error) {
return nil, e
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -320,7 +320,7 @@ func (s *SolaceScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec
metricName := kedautil.NormalizeString(fmt.Sprintf("solace-%s-%s", s.metadata.queueName, solaceTriggermsgcount))
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName),
},
Target: GetMetricTarget(s.metricType, s.metadata.msgCountTarget),
}
@@ -332,7 +332,7 @@ func (s *SolaceScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec
metricName := kedautil.NormalizeString(fmt.Sprintf("solace-%s-%s", s.metadata.queueName, solaceTriggermsgspoolusage))
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName),
},
Target: GetMetricTarget(s.metricType, s.metadata.msgSpoolUsageTarget),
}
@@ -344,7 +344,7 @@ func (s *SolaceScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec
metricName := kedautil.NormalizeString(fmt.Sprintf("solace-%s-%s", s.metadata.queueName, solaceTriggermsgrxrate))
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName),
},
Target: GetMetricTarget(s.metricType, s.metadata.msgRxRateTarget),
}
diff --git a/pkg/scalers/solace_scaler_test.go b/pkg/scalers/solace_scaler_test.go
index 28acc36337b..c9dfbbf7013 100644
--- a/pkg/scalers/solace_scaler_test.go
+++ b/pkg/scalers/solace_scaler_test.go
@@ -12,10 +12,10 @@ import (
)
type testSolaceMetadata struct {
- testID string
- metadata map[string]string
- scalerIndex int
- isError bool
+ testID string
+ metadata map[string]string
+ triggerIndex int
+ isError bool
}
var (
@@ -532,7 +532,7 @@ var testSolaceExpectedMetricNames = map[string]string{
func TestSolaceParseSolaceMetadata(t *testing.T) {
for _, testData := range testParseSolaceMetadata {
fmt.Print(testData.testID)
- meta, err := parseSolaceMetadata(&ScalerConfig{ResolvedEnv: nil, TriggerMetadata: testData.metadata, AuthParams: nil, ScalerIndex: testData.scalerIndex})
+ meta, err := parseSolaceMetadata(&ScalerConfig{ResolvedEnv: nil, TriggerMetadata: testData.metadata, AuthParams: nil, TriggerIndex: testData.triggerIndex})
switch {
case err != nil && !testData.isError:
t.Error("expected success but got error: ", err)
@@ -550,7 +550,7 @@ func TestSolaceParseSolaceMetadata(t *testing.T) {
}
for _, testData := range testSolaceEnvCreds {
fmt.Print(testData.testID)
- _, err := parseSolaceMetadata(&ScalerConfig{ResolvedEnv: testDataSolaceResolvedEnvVALID, TriggerMetadata: testData.metadata, AuthParams: nil, ScalerIndex: testData.scalerIndex})
+ _, err := parseSolaceMetadata(&ScalerConfig{ResolvedEnv: testDataSolaceResolvedEnvVALID, TriggerMetadata: testData.metadata, AuthParams: nil, TriggerIndex: testData.triggerIndex})
switch {
case err != nil && !testData.isError:
t.Error("expected success but got error: ", err)
@@ -564,7 +564,7 @@ func TestSolaceParseSolaceMetadata(t *testing.T) {
}
for _, testData := range testSolaceK8sSecretCreds {
fmt.Print(testData.testID)
- _, err := parseSolaceMetadata(&ScalerConfig{ResolvedEnv: nil, TriggerMetadata: testData.metadata, AuthParams: testDataSolaceAuthParamsVALID, ScalerIndex: testData.scalerIndex})
+ _, err := parseSolaceMetadata(&ScalerConfig{ResolvedEnv: nil, TriggerMetadata: testData.metadata, AuthParams: testDataSolaceAuthParamsVALID, TriggerIndex: testData.triggerIndex})
switch {
case err != nil && !testData.isError:
t.Error("expected success but got error: ", err)
@@ -584,7 +584,7 @@ func TestSolaceGetMetricSpec(t *testing.T) {
fmt.Print(testData.testID)
var err error
var solaceMeta *SolaceMetadata
- solaceMeta, err = parseSolaceMetadata(&ScalerConfig{ResolvedEnv: testDataSolaceResolvedEnvVALID, TriggerMetadata: testData.metadata, AuthParams: testDataSolaceAuthParamsVALID, ScalerIndex: testData.scalerIndex})
+ solaceMeta, err = parseSolaceMetadata(&ScalerConfig{ResolvedEnv: testDataSolaceResolvedEnvVALID, TriggerMetadata: testData.metadata, AuthParams: testDataSolaceAuthParamsVALID, TriggerIndex: testData.triggerIndex})
if err != nil {
fmt.Printf("\n Failed to parse metadata: %v", err)
} else {
diff --git a/pkg/scalers/solr_scaler.go b/pkg/scalers/solr_scaler.go
index c87dcbee448..83698056c96 100644
--- a/pkg/scalers/solr_scaler.go
+++ b/pkg/scalers/solr_scaler.go
@@ -28,7 +28,7 @@ type solrMetadata struct {
targetQueryValue float64
activationTargetQueryValue float64
query string
- scalerIndex int
+ triggerIndex int
// Authentication
username string
@@ -121,7 +121,7 @@ func parseSolrMetadata(config *ScalerConfig) (*solrMetadata, error) {
return nil, fmt.Errorf("no password given")
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
return &meta, nil
}
@@ -162,7 +162,7 @@ func (s *solrScaler) getItemCount(ctx context.Context) (float64, error) {
func (s *solrScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString("solr")),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString("solr")),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.targetQueryValue),
}
diff --git a/pkg/scalers/solr_scaler_test.go b/pkg/scalers/solr_scaler_test.go
index a91f1ea4bb9..0ebfb012123 100644
--- a/pkg/scalers/solr_scaler_test.go
+++ b/pkg/scalers/solr_scaler_test.go
@@ -14,7 +14,7 @@ type parseSolrMetadataTestData struct {
type solrMetricIdentifier struct {
metadataTestData *parseSolrMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -59,7 +59,7 @@ func TestSolrParseMetadata(t *testing.T) {
func TestSolrGetMetricSpecForScaling(t *testing.T) {
for _, testData := range solrMetricIdentifiers {
ctx := context.Background()
- meta, err := parseSolrMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ScalerIndex: testData.scalerIndex, AuthParams: testData.metadataTestData.authParams})
+ meta, err := parseSolrMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex, AuthParams: testData.metadataTestData.authParams})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scalers/stan_scaler.go b/pkg/scalers/stan_scaler.go
index 79796a42f57..23945d63e14 100644
--- a/pkg/scalers/stan_scaler.go
+++ b/pkg/scalers/stan_scaler.go
@@ -51,7 +51,7 @@ type stanMetadata struct {
subject string
lagThreshold int64
activationLagThreshold int64
- scalerIndex int
+ triggerIndex int
}
const (
@@ -119,7 +119,7 @@ func parseStanMetadata(config *ScalerConfig) (stanMetadata, error) {
meta.activationLagThreshold = activationTargetQueryValue
}
- meta.scalerIndex = config.ScalerIndex
+ meta.triggerIndex = config.TriggerIndex
var err error
useHTTPS := false
@@ -191,7 +191,7 @@ func (s *stanScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
metricName := kedautil.NormalizeString(fmt.Sprintf("stan-%s", s.metadata.subject))
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName),
},
Target: GetMetricTarget(s.metricType, s.metadata.lagThreshold),
}
diff --git a/pkg/scalers/stan_scaler_test.go b/pkg/scalers/stan_scaler_test.go
index be665cd3a8e..1bf12001885 100644
--- a/pkg/scalers/stan_scaler_test.go
+++ b/pkg/scalers/stan_scaler_test.go
@@ -17,7 +17,7 @@ type parseStanMetadataTestData struct {
type stanMetricIdentifier struct {
metadataTestData *parseStanMetadataTestData
- scalerIndex int
+ triggerIndex int
name string
}
@@ -61,7 +61,7 @@ func TestStanParseMetadata(t *testing.T) {
func TestStanGetMetricSpecForScaling(t *testing.T) {
for _, testData := range stanMetricIdentifiers {
ctx := context.Background()
- meta, err := parseStanMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ScalerIndex: testData.scalerIndex})
+ meta, err := parseStanMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
diff --git a/pkg/scaling/resolver/scale_resolvers.go b/pkg/scaling/resolver/scale_resolvers.go
index b7c305f5702..d73a1a33dcd 100644
--- a/pkg/scaling/resolver/scale_resolvers.go
+++ b/pkg/scaling/resolver/scale_resolvers.go
@@ -187,6 +187,27 @@ func ResolveAuthRefAndPodIdentity(ctx context.Context, client client.Client, log
}
switch podIdentity.Provider {
+ case kedav1alpha1.PodIdentityProviderAws:
+ if podIdentity.RoleArn != "" {
+ if podIdentity.IsWorkloadIdentityOwner() {
+ return nil, kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone},
+ fmt.Errorf("roleArn can't be set if KEDA isn't identity owner, current value: '%s'", *podIdentity.IdentityOwner)
+ }
+ authParams["awsRoleArn"] = podIdentity.RoleArn
+ }
+ if podIdentity.IsWorkloadIdentityOwner() {
+ serviceAccountName := defaultServiceAccount
+ if podTemplateSpec.Spec.ServiceAccountName != "" {
+ serviceAccountName = podTemplateSpec.Spec.ServiceAccountName
+ }
+ serviceAccount := &corev1.ServiceAccount{}
+ err := client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: namespace}, serviceAccount)
+ if err != nil {
+ return nil, kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone},
+ fmt.Errorf("error getting service account: '%s', error: %w", serviceAccountName, err)
+ }
+ authParams["awsRoleArn"] = serviceAccount.Annotations[kedav1alpha1.PodIdentityAnnotationEKS]
+ }
case kedav1alpha1.PodIdentityProviderAwsEKS:
serviceAccountName := defaultServiceAccount
if podTemplateSpec.Spec.ServiceAccountName != "" {
@@ -202,10 +223,6 @@ func ResolveAuthRefAndPodIdentity(ctx context.Context, client client.Client, log
case kedav1alpha1.PodIdentityProviderAwsKiam:
authParams["awsRoleArn"] = podTemplateSpec.ObjectMeta.Annotations[kedav1alpha1.PodIdentityAnnotationKiam]
case kedav1alpha1.PodIdentityProviderAzure, kedav1alpha1.PodIdentityProviderAzureWorkload:
- if podIdentity.Provider == kedav1alpha1.PodIdentityProviderAzure {
- // FIXME: Delete this for v2.15
- logger.Info("WARNING: Azure AD Pod Identity has been archived (https://github.com/Azure/aad-pod-identity#-announcement) and will be removed from KEDA on v2.15")
- }
if podIdentity.IdentityID != nil && *podIdentity.IdentityID == "" {
return nil, kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, fmt.Errorf("IdentityID of PodIdentity should not be empty")
}
diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go
index 5e7dc3ddc7f..dca0f6c4f36 100644
--- a/pkg/scaling/scale_handler.go
+++ b/pkg/scaling/scale_handler.go
@@ -453,13 +453,13 @@ func (h *scaleHandler) GetScaledObjectMetrics(ctx context.Context, scaledObjectN
// let's check metrics for all scalers in a ScaledObject
scalers, scalerConfigs := cache.GetScalers()
- for scalerIndex := 0; scalerIndex < len(scalers); scalerIndex++ {
- scalerName := strings.Replace(fmt.Sprintf("%T", scalers[scalerIndex]), "*scalers.", "", 1)
- if scalerConfigs[scalerIndex].TriggerName != "" {
- scalerName = scalerConfigs[scalerIndex].TriggerName
+ for triggerIndex := 0; triggerIndex < len(scalers); triggerIndex++ {
+ scalerName := strings.Replace(fmt.Sprintf("%T", scalers[triggerIndex]), "*scalers.", "", 1)
+ if scalerConfigs[triggerIndex].TriggerName != "" {
+ scalerName = scalerConfigs[triggerIndex].TriggerName
}
- metricSpecs, err := cache.GetMetricSpecForScalingForScaler(ctx, scalerIndex)
+ metricSpecs, err := cache.GetMetricSpecForScalingForScaler(ctx, triggerIndex)
if err != nil {
isScalerError = true
logger.Error(err, "error getting metric spec for the scaler", "scaler", scalerName)
@@ -485,7 +485,7 @@ func (h *scaleHandler) GetScaledObjectMetrics(ctx context.Context, scaledObjectN
// Pair metric values with its trigger names. This is applied only when
// ScalingModifiers.Formula is defined in SO.
- metricTriggerPairList, err = modifiers.AddPairTriggerAndMetric(metricTriggerPairList, scaledObject, metricName, scalerConfigs[scalerIndex].TriggerName)
+ metricTriggerPairList, err = modifiers.AddPairTriggerAndMetric(metricTriggerPairList, scaledObject, metricName, scalerConfigs[triggerIndex].TriggerName)
if err != nil {
logger.Error(err, "error pairing triggers & metrics for compositeScaler")
}
@@ -494,7 +494,7 @@ func (h *scaleHandler) GetScaledObjectMetrics(ctx context.Context, scaledObjectN
// if cache is defined for this scaler/metric, let's try to hit it first
metricsFoundInCache := false
- if scalerConfigs[scalerIndex].TriggerUseCachedMetrics {
+ if scalerConfigs[triggerIndex].TriggerUseCachedMetrics {
var metricsRecord metricscache.MetricsRecord
if metricsRecord, metricsFoundInCache = h.scaledObjectsMetricCache.ReadRecord(scaledObjectIdentifier, spec.External.Metric.Name); metricsFoundInCache {
logger.V(1).Info("Reading metrics from cache", "scaler", scalerName, "metricName", spec.External.Metric.Name, "metricsRecord", metricsRecord)
@@ -505,9 +505,9 @@ func (h *scaleHandler) GetScaledObjectMetrics(ctx context.Context, scaledObjectN
if !metricsFoundInCache {
var latency int64
- metrics, _, latency, err = cache.GetMetricsAndActivityForScaler(ctx, scalerIndex, metricName)
+ metrics, _, latency, err = cache.GetMetricsAndActivityForScaler(ctx, triggerIndex, metricName)
if latency != -1 {
- metricscollector.RecordScalerLatency(scaledObjectNamespace, scaledObject.Name, scalerName, scalerIndex, metricName, float64(latency))
+ metricscollector.RecordScalerLatency(scaledObjectNamespace, scaledObject.Name, scalerName, triggerIndex, metricName, float64(latency))
}
logger.V(1).Info("Getting metrics from scaler", "scaler", scalerName, "metricName", spec.External.Metric.Name, "metrics", metrics, "scalerError", err)
}
@@ -523,11 +523,11 @@ func (h *scaleHandler) GetScaledObjectMetrics(ctx context.Context, scaledObjectN
} else {
for _, metric := range metrics {
metricValue := metric.Value.AsApproximateFloat64()
- metricscollector.RecordScalerMetric(scaledObjectNamespace, scaledObjectName, scalerName, scalerIndex, metric.MetricName, metricValue)
+ metricscollector.RecordScalerMetric(scaledObjectNamespace, scaledObjectName, scalerName, triggerIndex, metric.MetricName, metricValue)
}
matchingMetrics = append(matchingMetrics, metrics...)
}
- metricscollector.RecordScalerError(scaledObjectNamespace, scaledObjectName, scalerName, scalerIndex, metricName, err)
+ metricscollector.RecordScalerError(scaledObjectNamespace, scaledObjectName, scalerName, triggerIndex, metricName, err)
}
}
}
@@ -586,8 +586,8 @@ func (h *scaleHandler) getScaledObjectState(ctx context.Context, scaledObject *k
// Let's collect status of all scalers, no matter if any scaler raises error or is active
scalers, scalerConfigs := cache.GetScalers()
- for scalerIndex := 0; scalerIndex < len(scalers); scalerIndex++ {
- result := h.getScalerState(ctx, scalers[scalerIndex], scalerIndex, scalerConfigs[scalerIndex], cache, logger, scaledObject)
+ for triggerIndex := 0; triggerIndex < len(scalers); triggerIndex++ {
+ result := h.getScalerState(ctx, scalers[triggerIndex], triggerIndex, scalerConfigs[triggerIndex], cache, logger, scaledObject)
if !isScaledObjectActive {
isScaledObjectActive = result.IsActive
}
@@ -666,7 +666,7 @@ type scalerState struct {
// for an specific scaler. The state contains if it's active or
// with erros, but also the records for the cache and he metrics
// for the custom formulas
-func (*scaleHandler) getScalerState(ctx context.Context, scaler scalers.Scaler, scalerIndex int, scalerConfig scalers.ScalerConfig,
+func (*scaleHandler) getScalerState(ctx context.Context, scaler scalers.Scaler, triggerIndex int, scalerConfig scalers.ScalerConfig,
cache *cache.ScalersCache, logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) scalerState {
result := scalerState{
IsActive: false,
@@ -681,7 +681,7 @@ func (*scaleHandler) getScalerState(ctx context.Context, scaler scalers.Scaler,
scalerName = scalerConfig.TriggerName
}
- metricSpecs, err := cache.GetMetricSpecForScalingForScaler(ctx, scalerIndex)
+ metricSpecs, err := cache.GetMetricSpecForScalingForScaler(ctx, triggerIndex)
if err != nil {
result.IsError = true
logger.Error(err, "error getting metric spec for the scaler", "scaler", scalerName)
@@ -696,9 +696,9 @@ func (*scaleHandler) getScalerState(ctx context.Context, scaler scalers.Scaler,
metricName := spec.External.Metric.Name
var latency int64
- metrics, isMetricActive, latency, err := cache.GetMetricsAndActivityForScaler(ctx, scalerIndex, metricName)
+ metrics, isMetricActive, latency, err := cache.GetMetricsAndActivityForScaler(ctx, triggerIndex, metricName)
if latency != -1 {
- metricscollector.RecordScalerLatency(scaledObject.Namespace, scaledObject.Name, scalerName, scalerIndex, metricName, float64(latency))
+ metricscollector.RecordScalerLatency(scaledObject.Namespace, scaledObject.Name, scalerName, triggerIndex, metricName, float64(latency))
}
result.Metrics = append(result.Metrics, metrics...)
logger.V(1).Info("Getting metrics and activity from scaler", "scaler", scalerName, "metricName", metricName, "metrics", metrics, "activity", isMetricActive, "scalerError", err)
@@ -724,7 +724,7 @@ func (*scaleHandler) getScalerState(ctx context.Context, scaler scalers.Scaler,
result.IsActive = isMetricActive
for _, metric := range metrics {
metricValue := metric.Value.AsApproximateFloat64()
- metricscollector.RecordScalerMetric(scaledObject.Namespace, scaledObject.Name, scalerName, scalerIndex, metric.MetricName, metricValue)
+ metricscollector.RecordScalerMetric(scaledObject.Namespace, scaledObject.Name, scalerName, triggerIndex, metric.MetricName, metricValue)
}
if !scaledObject.IsUsingModifiers() {
if isMetricActive {
@@ -735,7 +735,7 @@ func (*scaleHandler) getScalerState(ctx context.Context, scaler scalers.Scaler,
logger.V(1).Info("Scaler for scaledObject is active", "scaler", scalerName, "metricName", spec.Resource.Name)
}
}
- metricscollector.RecordScalerActive(scaledObject.Namespace, scaledObject.Name, scalerName, scalerIndex, metricName, isMetricActive)
+ metricscollector.RecordScalerActive(scaledObject.Namespace, scaledObject.Name, scalerName, triggerIndex, metricName, isMetricActive)
}
}
diff --git a/pkg/scaling/scale_handler_test.go b/pkg/scaling/scale_handler_test.go
index 68402d6fe50..b3f39e5eddf 100644
--- a/pkg/scaling/scale_handler_test.go
+++ b/pkg/scaling/scale_handler_test.go
@@ -771,8 +771,8 @@ func TestScalingModifiersFormula(t *testing.T) {
scaler1 := mock_scalers.NewMockScaler(ctrl)
scaler2 := mock_scalers.NewMockScaler(ctrl)
// dont use cached metrics
- scalerConfig1 := scalers.ScalerConfig{TriggerUseCachedMetrics: false, TriggerName: triggerName1, ScalerIndex: 0}
- scalerConfig2 := scalers.ScalerConfig{TriggerUseCachedMetrics: false, TriggerName: triggerName2, ScalerIndex: 1}
+ scalerConfig1 := scalers.ScalerConfig{TriggerUseCachedMetrics: false, TriggerName: triggerName1, TriggerIndex: 0}
+ scalerConfig2 := scalers.ScalerConfig{TriggerUseCachedMetrics: false, TriggerName: triggerName2, TriggerIndex: 1}
factory1 := func() (scalers.Scaler, *scalers.ScalerConfig, error) {
return scaler1, &scalerConfig1, nil
}
diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go
index 19af188a264..3d3f63d057c 100644
--- a/pkg/scaling/scalers_builder.go
+++ b/pkg/scaling/scalers_builder.go
@@ -62,12 +62,26 @@ func (h *scaleHandler) buildScalers(ctx context.Context, withTriggers *kedav1alp
ResolvedEnv: resolvedEnv,
AuthParams: make(map[string]string),
GlobalHTTPTimeout: h.globalHTTPTimeout,
- ScalerIndex: triggerIndex,
+ TriggerIndex: triggerIndex,
MetricType: trigger.MetricType,
AsMetricSource: asMetricSource,
+ TriggerUniqueKey: fmt.Sprintf("%s-%s-%s-%d", withTriggers.Kind, withTriggers.Namespace, withTriggers.Name, triggerIndex),
}
authParams, podIdentity, err := resolver.ResolveAuthRefAndPodIdentity(ctx, h.client, logger, trigger.AuthenticationRef, podTemplateSpec, withTriggers.Namespace, h.secretsLister)
+ switch podIdentity.Provider {
+ case kedav1alpha1.PodIdentityProviderAzure:
+ // FIXME: Delete this for v2.15
+ logger.Info("WARNING: Azure AD Pod Identity has been archived (https://github.com/Azure/aad-pod-identity#-announcement) and will be removed from KEDA on v2.15")
+ case kedav1alpha1.PodIdentityProviderAwsKiam:
+ // FIXME: Delete this for v2.15
+ logger.Info("WARNING: AWS Kiam Identity has been abandoned (https://github.com/uswitch/kiam) and will be removed from KEDA on v2.15")
+ case kedav1alpha1.PodIdentityProviderAwsEKS:
+ // FIXME: Delete this for v3
+ logger.Info("WARNING: AWS EKS Identity has been deprecated in favor of AWS Identity and will be removed from KEDA on v3")
+ default:
+ }
+
if err != nil {
return nil, nil, err
}
@@ -77,10 +91,11 @@ func (h *scaleHandler) buildScalers(ctx context.Context, withTriggers *kedav1alp
return scaler, config, err
}
+ // nosemgrep: invalid-usage-of-modified-variable
scaler, config, err := factory()
if err != nil {
h.recorder.Event(withTriggers, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error())
- logger.Error(err, "error resolving auth params", "scalerIndex", triggerIndex)
+ logger.Error(err, "error resolving auth params", "triggerIndex", triggerIndex)
if scaler != nil {
scaler.Close(ctx)
}
diff --git a/tests/run-all.go b/tests/run-all.go
index 3c60c2c8e98..bf9029c5ff4 100644
--- a/tests/run-all.go
+++ b/tests/run-all.go
@@ -354,6 +354,7 @@ func printKedaLogs() {
fmt.Println(operatorLogs)
fmt.Println("##############################################")
fmt.Println("##############################################")
+ saveLogToFile("keda-operator.log", operatorLogs)
}
msLogs, err := helper.FindPodLogs(kubeClient, "keda", "app=keda-metrics-apiserver", true)
@@ -362,6 +363,7 @@ func printKedaLogs() {
fmt.Println(msLogs)
fmt.Println("##############################################")
fmt.Println("##############################################")
+ saveLogToFile("keda-metrics-server.log", msLogs)
}
hooksLogs, err := helper.FindPodLogs(kubeClient, "keda", "app=keda-admission-webhooks", true)
@@ -370,5 +372,20 @@ func printKedaLogs() {
fmt.Println(hooksLogs)
fmt.Println("##############################################")
fmt.Println("##############################################")
+ saveLogToFile("keda-webhooks.log", hooksLogs)
+ }
+}
+
+func saveLogToFile(file string, lines []string) {
+ f, err := os.Create(file)
+ if err != nil {
+ fmt.Print(err)
+ }
+ defer f.Close()
+ for _, line := range lines {
+ _, err := f.WriteString(line + "\n")
+ if err != nil {
+ fmt.Print(err)
+ }
}
}
diff --git a/tests/scalers/aws/aws_cloudwatch_pod_identity/aws_cloudwatch_pod_identity_test.go b/tests/scalers/aws/aws_cloudwatch_pod_identity/aws_cloudwatch_pod_identity_test.go
index 04697f58af3..12bba970c6a 100644
--- a/tests/scalers/aws/aws_cloudwatch_pod_identity/aws_cloudwatch_pod_identity_test.go
+++ b/tests/scalers/aws/aws_cloudwatch_pod_identity/aws_cloudwatch_pod_identity_test.go
@@ -51,7 +51,7 @@ metadata:
namespace: {{.TestNamespace}}
spec:
podIdentity:
- provider: aws-eks
+ provider: aws
`
deploymentTemplate = `
diff --git a/tests/scalers/aws/aws_cloudwatch_pod_identity_eks/aws_cloudwatch_pod_identity_eks_test.go b/tests/scalers/aws/aws_cloudwatch_pod_identity_eks/aws_cloudwatch_pod_identity_eks_test.go
new file mode 100644
index 00000000000..dd372b71adb
--- /dev/null
+++ b/tests/scalers/aws/aws_cloudwatch_pod_identity_eks/aws_cloudwatch_pod_identity_eks_test.go
@@ -0,0 +1,225 @@
+//go:build e2e
+// +build e2e
+
+package aws_cloudwatch_pod_identity_eks_test
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/cloudwatch"
+ "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types"
+ "github.com/joho/godotenv"
+ "github.com/stretchr/testify/assert"
+ "k8s.io/client-go/kubernetes"
+
+ . "github.com/kedacore/keda/v2/tests/helper"
+)
+
+// Load environment variables from .env file
+var _ = godotenv.Load("../../../.env")
+
+const (
+ testName = "aws-cloudwatch-pod-identity-eks-test"
+)
+
+type templateData struct {
+ TestNamespace string
+ DeploymentName string
+ ScaledObjectName string
+ SecretName string
+ AwsAccessKeyID string
+ AwsSecretAccessKey string
+ AwsRegion string
+ CloudWatchMetricName string
+ CloudWatchMetricNamespace string
+ CloudWatchMetricDimensionName string
+ CloudWatchMetricDimensionValue string
+}
+
+const (
+ triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: keda-trigger-auth-aws-credentials
+ namespace: {{.TestNamespace}}
+spec:
+ podIdentity:
+ provider: aws-eks
+`
+
+ deploymentTemplate = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{.DeploymentName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ replicas: 0
+ selector:
+ matchLabels:
+ app: {{.DeploymentName}}
+ template:
+ metadata:
+ labels:
+ app: {{.DeploymentName}}
+ spec:
+ containers:
+ - name: nginx
+ image: nginxinc/nginx-unprivileged
+ ports:
+ - containerPort: 80
+`
+
+ scaledObjectTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: {{.ScaledObjectName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ scaleTargetRef:
+ name: {{.DeploymentName}}
+ maxReplicaCount: 2
+ minReplicaCount: 0
+ cooldownPeriod: 1
+ triggers:
+ - type: aws-cloudwatch
+ authenticationRef:
+ name: keda-trigger-auth-aws-credentials
+ metadata:
+ awsRegion: {{.AwsRegion}}
+ namespace: {{.CloudWatchMetricNamespace}}
+ dimensionName: {{.CloudWatchMetricDimensionName}}
+ dimensionValue: {{.CloudWatchMetricDimensionValue}}
+ metricName: {{.CloudWatchMetricName}}
+ targetMetricValue: "1"
+ activationTargetMetricValue: "5"
+ minMetricValue: "0"
+ metricCollectionTime: "120"
+ metricStatPeriod: "30"
+ identityOwner: operator
+`
+)
+
+var (
+ testNamespace = fmt.Sprintf("%s-ns", testName)
+ deploymentName = fmt.Sprintf("%s-deployment", testName)
+ scaledObjectName = fmt.Sprintf("%s-so", testName)
+ secretName = fmt.Sprintf("%s-secret", testName)
+ cloudwatchMetricName = fmt.Sprintf("cw-identity-%d", GetRandomNumber())
+ awsAccessKeyID = os.Getenv("TF_AWS_ACCESS_KEY")
+ awsSecretAccessKey = os.Getenv("TF_AWS_SECRET_KEY")
+ awsRegion = os.Getenv("TF_AWS_REGION")
+ cloudwatchMetricNamespace = "KEDA"
+ cloudwatchMetricDimensionName = "dimensionName"
+ cloudwatchMetricDimensionValue = "dimensionValue"
+ maxReplicaCount = 2
+ minReplicaCount = 0
+)
+
+func TestCloudWatchScaler(t *testing.T) {
+ // setup cloudwatch
+ cloudwatchClient := createCloudWatchClient()
+ setCloudWatchCustomMetric(t, cloudwatchClient, 0)
+
+ // Create kubernetes resources
+ kc := GetKubernetesClient(t)
+ data, templates := getTemplateData()
+ CreateKubernetesResources(t, kc, testNamespace, data, templates)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1),
+ "replica count should be %d after 1 minute", minReplicaCount)
+
+ // test scaling
+ testActivation(t, kc, cloudwatchClient)
+ testScaleOut(t, kc, cloudwatchClient)
+ testScaleIn(t, kc, cloudwatchClient)
+
+ // cleanup
+ DeleteKubernetesResources(t, testNamespace, data, templates)
+
+ setCloudWatchCustomMetric(t, cloudwatchClient, 0)
+}
+
+func testActivation(t *testing.T, kc *kubernetes.Clientset, cloudwatchClient *cloudwatch.Client) {
+ t.Log("--- testing activation ---")
+ setCloudWatchCustomMetric(t, cloudwatchClient, 3)
+
+ AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60)
+}
+
+func testScaleOut(t *testing.T, kc *kubernetes.Clientset, cloudwatchClient *cloudwatch.Client) {
+ t.Log("--- testing scale out ---")
+ setCloudWatchCustomMetric(t, cloudwatchClient, 10)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3),
+ "replica count should be %d after 3 minutes", maxReplicaCount)
+}
+
+func testScaleIn(t *testing.T, kc *kubernetes.Clientset, cloudwatchClient *cloudwatch.Client) {
+ t.Log("--- testing scale in ---")
+
+ setCloudWatchCustomMetric(t, cloudwatchClient, 0)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3),
+ "replica count should be %d after 3 minutes", minReplicaCount)
+}
+
+func setCloudWatchCustomMetric(t *testing.T, cloudwatchClient *cloudwatch.Client, value float64) {
+ _, err := cloudwatchClient.PutMetricData(context.Background(), &cloudwatch.PutMetricDataInput{
+ MetricData: []types.MetricDatum{
+ {
+ MetricName: aws.String(cloudwatchMetricName),
+ Dimensions: []types.Dimension{
+ {
+ Name: aws.String(cloudwatchMetricDimensionName),
+ Value: aws.String(cloudwatchMetricDimensionValue),
+ },
+ },
+ Unit: types.StandardUnitNone,
+ Value: aws.Float64(value),
+ },
+ },
+ Namespace: aws.String(cloudwatchMetricNamespace),
+ })
+ assert.NoErrorf(t, err, "failed to set cloudwatch metric - %s", err)
+}
+
+func createCloudWatchClient() *cloudwatch.Client {
+ configOptions := make([]func(*config.LoadOptions) error, 0)
+ configOptions = append(configOptions, config.WithRegion(awsRegion))
+ cfg, _ := config.LoadDefaultConfig(context.TODO(), configOptions...)
+ cfg.Credentials = credentials.NewStaticCredentialsProvider(awsAccessKeyID, awsSecretAccessKey, "")
+ return cloudwatch.NewFromConfig(cfg)
+}
+
+func getTemplateData() (templateData, []Template) {
+ return templateData{
+ TestNamespace: testNamespace,
+ DeploymentName: deploymentName,
+ ScaledObjectName: scaledObjectName,
+ SecretName: secretName,
+ AwsAccessKeyID: base64.StdEncoding.EncodeToString([]byte(awsAccessKeyID)),
+ AwsSecretAccessKey: base64.StdEncoding.EncodeToString([]byte(awsSecretAccessKey)),
+ AwsRegion: awsRegion,
+ CloudWatchMetricName: cloudwatchMetricName,
+ CloudWatchMetricNamespace: cloudwatchMetricNamespace,
+ CloudWatchMetricDimensionName: cloudwatchMetricDimensionName,
+ CloudWatchMetricDimensionValue: cloudwatchMetricDimensionValue,
+ }, []Template{
+ {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate},
+ {Name: "deploymentTemplate", Config: deploymentTemplate},
+ {Name: "scaledObjectTemplate", Config: scaledObjectTemplate},
+ }
+}
diff --git a/tests/scalers/aws/aws_dynamodb_pod_identity/aws_dynamodb_pod_identity_test.go b/tests/scalers/aws/aws_dynamodb_pod_identity/aws_dynamodb_pod_identity_test.go
index ab5cc6cd7d5..aea9e215e76 100644
--- a/tests/scalers/aws/aws_dynamodb_pod_identity/aws_dynamodb_pod_identity_test.go
+++ b/tests/scalers/aws/aws_dynamodb_pod_identity/aws_dynamodb_pod_identity_test.go
@@ -53,7 +53,7 @@ metadata:
namespace: {{.TestNamespace}}
spec:
podIdentity:
- provider: aws-eks
+ provider: aws
`
deploymentTemplate = `
diff --git a/tests/scalers/aws/aws_dynamodb_pod_identity_eks/aws_dynamodb_pod_identity_eks_test.go b/tests/scalers/aws/aws_dynamodb_pod_identity_eks/aws_dynamodb_pod_identity_eks_test.go
new file mode 100644
index 00000000000..70a9c43a27b
--- /dev/null
+++ b/tests/scalers/aws/aws_dynamodb_pod_identity_eks/aws_dynamodb_pod_identity_eks_test.go
@@ -0,0 +1,277 @@
+//go:build e2e
+// +build e2e
+
+package aws_dynamodb_pod_identity_eks_test
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "os"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/joho/godotenv"
+ "github.com/stretchr/testify/assert"
+ "k8s.io/client-go/kubernetes"
+
+ . "github.com/kedacore/keda/v2/tests/helper"
+)
+
+// Load environment variables from .env file
+var _ = godotenv.Load("../../../.env")
+
+const (
+ testName = "aws-dynamodb-pod-identity-eks-test"
+)
+
+type templateData struct {
+ TestNamespace string
+ DeploymentName string
+ ScaledObjectName string
+ SecretName string
+ AwsAccessKeyID string
+ AwsSecretAccessKey string
+ AwsRegion string
+ DynamoDBTableName string
+ ExpressionAttributeNames string
+ KeyConditionExpression string
+ ExpressionAttributeValues string
+}
+
+const (
+ triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: keda-trigger-auth-aws-credentials
+ namespace: {{.TestNamespace}}
+spec:
+ podIdentity:
+ provider: aws-eks
+`
+
+ deploymentTemplate = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{.DeploymentName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ replicas: 0
+ selector:
+ matchLabels:
+ app: {{.DeploymentName}}
+ template:
+ metadata:
+ labels:
+ app: {{.DeploymentName}}
+ spec:
+ containers:
+ - name: nginx
+ image: nginxinc/nginx-unprivileged
+ ports:
+ - containerPort: 80
+`
+
+ scaledObjectTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: {{.ScaledObjectName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ scaleTargetRef:
+ name: {{.DeploymentName}}
+ maxReplicaCount: 2
+ minReplicaCount: 0
+ cooldownPeriod: 1
+ triggers:
+ - type: aws-dynamodb
+ authenticationRef:
+ name: keda-trigger-auth-aws-credentials
+ metadata:
+ awsRegion: {{.AwsRegion}}
+ tableName: {{.DynamoDBTableName}}
+ expressionAttributeNames: '{{.ExpressionAttributeNames}}'
+ keyConditionExpression: '{{.KeyConditionExpression}}'
+ expressionAttributeValues: '{{.ExpressionAttributeValues}}'
+ targetValue: '1'
+ activationTargetValue: '4'
+ identityOwner: operator
+`
+)
+
+var (
+ testNamespace = fmt.Sprintf("%s-ns", testName)
+ deploymentName = fmt.Sprintf("%s-deployment", testName)
+ scaledObjectName = fmt.Sprintf("%s-so", testName)
+ secretName = fmt.Sprintf("%s-secret", testName)
+ dynamoDBTableName = fmt.Sprintf("table-identity-%d", GetRandomNumber())
+ awsAccessKeyID = os.Getenv("TF_AWS_ACCESS_KEY")
+ awsSecretAccessKey = os.Getenv("TF_AWS_SECRET_KEY")
+ awsRegion = os.Getenv("TF_AWS_REGION")
+ expressionAttributeNames = "{ \"#k\" : \"event_type\"}"
+ keyConditionExpression = "#k = :key"
+ expressionAttributeValues = "{ \":key\" : {\"S\":\"scaling_event\"}}"
+ maxReplicaCount = 2
+ minReplicaCount = 0
+)
+
+func TestDynamoDBScaler(t *testing.T) {
+ // setup dynamodb
+ dynamodbClient := createDynamoDBClient()
+ createDynamoDBTable(t, dynamodbClient)
+
+ // Create kubernetes resources
+ kc := GetKubernetesClient(t)
+ data, templates := getTemplateData()
+ CreateKubernetesResources(t, kc, testNamespace, data, templates)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1),
+ "replica count should be %d after 1 minute", minReplicaCount)
+
+ // test scaling
+ testActivation(t, kc, dynamodbClient)
+ testScaleOut(t, kc, dynamodbClient)
+ testScaleIn(t, kc, dynamodbClient)
+
+ // cleanup
+ DeleteKubernetesResources(t, testNamespace, data, templates)
+ cleanupTable(t, dynamodbClient)
+}
+
+func testActivation(t *testing.T, kc *kubernetes.Clientset, dynamodbClient *dynamodb.Client) {
+ t.Log("--- testing activation ---")
+ addMessages(t, dynamodbClient, 3)
+ AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60)
+}
+
+func testScaleOut(t *testing.T, kc *kubernetes.Clientset, dynamodbClient *dynamodb.Client) {
+ t.Log("--- testing scale out ---")
+ addMessages(t, dynamodbClient, 6)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3),
+ "replica count should be %d after 3 minutes", maxReplicaCount)
+}
+
+func testScaleIn(t *testing.T, kc *kubernetes.Clientset, dynamodbClient *dynamodb.Client) {
+ t.Log("--- testing scale in ---")
+
+ for i := 0; i < 6; i++ {
+ _, err := dynamodbClient.DeleteItem(context.Background(), &dynamodb.DeleteItemInput{
+ TableName: aws.String(dynamoDBTableName),
+ Key: map[string]types.AttributeValue{
+ "event_type": &types.AttributeValueMemberS{
+ Value: "scaling_event",
+ },
+ "event_id": &types.AttributeValueMemberS{
+ Value: strconv.Itoa(i),
+ },
+ },
+ })
+ assert.NoErrorf(t, err, "failed to delete item - %s", err)
+ }
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3),
+ "replica count should be %d after 3 minutes", minReplicaCount)
+}
+
+func addMessages(t *testing.T, dynamodbClient *dynamodb.Client, messages int) {
+ for i := 0; i < messages; i++ {
+ _, err := dynamodbClient.PutItem(context.Background(), &dynamodb.PutItemInput{
+ TableName: aws.String(dynamoDBTableName),
+ Item: map[string]types.AttributeValue{
+ "event_type": &types.AttributeValueMemberS{
+ Value: "scaling_event",
+ },
+ "event_id": &types.AttributeValueMemberS{
+ Value: strconv.Itoa(i),
+ },
+ },
+ })
+ t.Log("Message enqueued")
+ assert.NoErrorf(t, err, "failed to create item - %s", err)
+ }
+}
+
+func createDynamoDBTable(t *testing.T, dynamodbClient *dynamodb.Client) {
+ _, err := dynamodbClient.CreateTable(context.Background(), &dynamodb.CreateTableInput{
+ TableName: aws.String(dynamoDBTableName),
+ KeySchema: []types.KeySchemaElement{
+ {AttributeName: aws.String("event_type"), KeyType: types.KeyTypeHash},
+ {AttributeName: aws.String("event_id"), KeyType: types.KeyTypeRange},
+ },
+ AttributeDefinitions: []types.AttributeDefinition{
+ {AttributeName: aws.String("event_type"), AttributeType: types.ScalarAttributeTypeS},
+ {AttributeName: aws.String("event_id"), AttributeType: types.ScalarAttributeTypeS},
+ },
+ ProvisionedThroughput: &types.ProvisionedThroughput{
+ ReadCapacityUnits: aws.Int64(5),
+ WriteCapacityUnits: aws.Int64(5),
+ },
+ })
+ assert.NoErrorf(t, err, "failed to create table - %s", err)
+ done := waitForTableActiveStatus(t, dynamodbClient)
+ if !done {
+ assert.True(t, true, "failed to create dynamodb")
+ }
+}
+
+func waitForTableActiveStatus(t *testing.T, dynamodbClient *dynamodb.Client) bool {
+ for i := 0; i < 30; i++ {
+ describe, _ := dynamodbClient.DescribeTable(context.Background(), &dynamodb.DescribeTableInput{
+ TableName: aws.String(dynamoDBTableName),
+ })
+ t.Logf("Waiting for table ACTIVE status. current status - %s", describe.Table.TableStatus)
+ if describe.Table.TableStatus == "ACTIVE" {
+ return true
+ }
+ time.Sleep(time.Second * 2)
+ }
+ return false
+}
+
+func cleanupTable(t *testing.T, dynamodbClient *dynamodb.Client) {
+ t.Log("--- cleaning up ---")
+ _, err := dynamodbClient.DeleteTable(context.Background(), &dynamodb.DeleteTableInput{
+ TableName: aws.String(dynamoDBTableName),
+ })
+ assert.NoErrorf(t, err, "cannot delete stream - %s", err)
+}
+
+func createDynamoDBClient() *dynamodb.Client {
+ configOptions := make([]func(*config.LoadOptions) error, 0)
+ configOptions = append(configOptions, config.WithRegion(awsRegion))
+ cfg, _ := config.LoadDefaultConfig(context.TODO(), configOptions...)
+ cfg.Credentials = credentials.NewStaticCredentialsProvider(awsAccessKeyID, awsSecretAccessKey, "")
+ return dynamodb.NewFromConfig(cfg)
+}
+
+func getTemplateData() (templateData, []Template) {
+ return templateData{
+ TestNamespace: testNamespace,
+ DeploymentName: deploymentName,
+ ScaledObjectName: scaledObjectName,
+ SecretName: secretName,
+ AwsAccessKeyID: base64.StdEncoding.EncodeToString([]byte(awsAccessKeyID)),
+ AwsSecretAccessKey: base64.StdEncoding.EncodeToString([]byte(awsSecretAccessKey)),
+ AwsRegion: awsRegion,
+ DynamoDBTableName: dynamoDBTableName,
+ ExpressionAttributeNames: expressionAttributeNames,
+ KeyConditionExpression: keyConditionExpression,
+ ExpressionAttributeValues: expressionAttributeValues,
+ }, []Template{
+ {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate},
+ {Name: "deploymentTemplate", Config: deploymentTemplate},
+ {Name: "scaledObjectTemplate", Config: scaledObjectTemplate},
+ }
+}
diff --git a/tests/scalers/aws/aws_dynamodb_streams_pod_identity/aws_dynamodb_streams_pod_identity_test.go b/tests/scalers/aws/aws_dynamodb_streams_pod_identity/aws_dynamodb_streams_pod_identity_test.go
index f6e937f62ff..ad2b93db98b 100644
--- a/tests/scalers/aws/aws_dynamodb_streams_pod_identity/aws_dynamodb_streams_pod_identity_test.go
+++ b/tests/scalers/aws/aws_dynamodb_streams_pod_identity/aws_dynamodb_streams_pod_identity_test.go
@@ -91,7 +91,7 @@ metadata:
namespace: {{.TestNamespace}}
spec:
podIdentity:
- provider: aws-eks
+ provider: aws
`
scaledObjectTemplate = `
diff --git a/tests/scalers/aws/aws_dynamodb_streams_pod_identity_eks/aws_dynamodb_streams_pod_identity_eks_test.go b/tests/scalers/aws/aws_dynamodb_streams_pod_identity_eks/aws_dynamodb_streams_pod_identity_eks_test.go
new file mode 100644
index 00000000000..3d000d0db1d
--- /dev/null
+++ b/tests/scalers/aws/aws_dynamodb_streams_pod_identity_eks/aws_dynamodb_streams_pod_identity_eks_test.go
@@ -0,0 +1,294 @@
+//go:build e2e
+// +build e2e
+
+package aws_dynamodb_streams_pod_identity_eks_test
+
+import (
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ dynamodbTypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodbstreams"
+ "github.com/joho/godotenv"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "k8s.io/client-go/kubernetes"
+
+ . "github.com/kedacore/keda/v2/tests/helper"
+)
+
+// Load environment variables from .env file
+var _ = godotenv.Load("../../../.env")
+
+const (
+ testName = "aws-dynamodb-streams-pod-identity-eks-test"
+)
+
+var (
+ awsRegion = os.Getenv("TF_AWS_REGION")
+ awsAccessKey = os.Getenv("TF_AWS_ACCESS_KEY")
+ awsSecretKey = os.Getenv("TF_AWS_SECRET_KEY")
+ testNamespace = fmt.Sprintf("%s-ns", testName)
+ secretName = fmt.Sprintf("%s-secret", testName)
+ deploymentName = fmt.Sprintf("%s-deployment", testName)
+ triggerAuthName = fmt.Sprintf("%s-ta", testName)
+ scaledObjectName = fmt.Sprintf("%s-so", testName)
+ tableName = fmt.Sprintf("stream-identity-%d", GetRandomNumber())
+ shardCount = 2 // default count
+ activationShardCount = 0 // default count
+)
+
+type templateData struct {
+ TestNamespace string
+ SecretName string
+ AwsRegion string
+ AwsAccessKey string
+ AwsSecretKey string
+ DeploymentName string
+ TriggerAuthName string
+ ScaledObjectName string
+ TableName string
+ ShardCount int64
+ ActivationShardCount int64
+}
+
+const (
+ deploymentTemplate = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{.DeploymentName}}
+ namespace: {{.TestNamespace}}
+spec:
+ replicas: 0
+ selector:
+ matchLabels:
+ app: {{.DeploymentName}}
+ template:
+ metadata:
+ labels:
+ app: {{.DeploymentName}}
+ spec:
+ containers:
+ - name: nginx
+ image: nginxinc/nginx-unprivileged
+`
+
+ triggerAuthTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: {{.TriggerAuthName}}
+ namespace: {{.TestNamespace}}
+spec:
+ podIdentity:
+ provider: aws-eks
+`
+
+ scaledObjectTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: {{.ScaledObjectName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ deploymentName: {{.DeploymentName}}
+spec:
+ scaleTargetRef:
+ name: {{.DeploymentName}}
+ maxReplicaCount: 2
+ minReplicaCount: 0
+ pollingInterval: 5 # Optional. Default: 30 seconds
+ cooldownPeriod: 1 # Optional. Default: 300 seconds
+ triggers:
+ - type: aws-dynamodb-streams
+ authenticationRef:
+ name: {{.TriggerAuthName}}
+ metadata:
+ awsRegion: {{.AwsRegion}} # Required
+ tableName: {{.TableName}} # Required
+ shardCount: "{{.ShardCount}}" # Optional. Default: 2
+ activationShardCount: "{{.ActivationShardCount}}" # Optional. Default: 0
+ identityOwner: operator
+`
+)
+
+func TestScaler(t *testing.T) {
+ t.Log("--- setting up ---")
+ require.NotEmpty(t, awsAccessKey, "AWS_ACCESS_KEY env variable is required for dynamodb streams tests")
+ require.NotEmpty(t, awsSecretKey, "AWS_SECRET_KEY env variable is required for dynamodb streams tests")
+ data, templates := getTemplateData()
+
+ // Create DynamoDB table and the latest stream Arn for the table
+ dbClient, dbStreamsClient := setupDynamoDBStreams(t)
+ streamArn, err := getLatestStreamArn(dbClient)
+ assert.NoErrorf(t, err, "cannot get latest stream arn for the table - %s", err)
+ time.Sleep(10 * time.Second)
+
+ // Get Shard Count
+ shardCount, err := getDynamoDBStreamShardCount(dbStreamsClient, streamArn)
+ assert.True(t, shardCount >= 2, "dynamodb stream shard count should be 2 or higher - %s", err)
+
+ // Deploy nginx, secret, and triggerAuth
+ kc := GetKubernetesClient(t)
+ CreateNamespace(t, kc, testNamespace)
+ KubectlApplyWithTemplate(t, data, "deploymentTemplate", deploymentTemplate)
+ KubectlApplyWithTemplate(t, data, "triggerAuthTemplate", triggerAuthTemplate)
+
+ // Wait for nginx to load
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 30, 3),
+ "replica count should start out as 0")
+
+ // test scaling
+ testActivation(t, kc, data)
+ testScaleOut(t, kc, data, shardCount)
+ testScaleIn(t, kc, data, shardCount)
+
+ // cleanup
+ DeleteKubernetesResources(t, testNamespace, data, templates)
+ cleanupDynamoDBTable(t, dbClient)
+}
+
+func setupDynamoDBStreams(t *testing.T) (*dynamodb.Client, *dynamodbstreams.Client) {
+ var dbClient *dynamodb.Client
+ var dbStreamClient *dynamodbstreams.Client
+
+ configOptions := make([]func(*config.LoadOptions) error, 0)
+ configOptions = append(configOptions, config.WithRegion(awsRegion))
+ cfg, _ := config.LoadDefaultConfig(context.TODO(), configOptions...)
+ cfg.Credentials = credentials.NewStaticCredentialsProvider(awsAccessKey, awsSecretKey, "")
+
+ dbClient = dynamodb.NewFromConfig(cfg)
+ dbStreamClient = dynamodbstreams.NewFromConfig(cfg)
+
+ err := createTable(dbClient)
+ assert.NoErrorf(t, err, "cannot create dynamodb table - %s", err)
+
+ return dbClient, dbStreamClient
+}
+
+func createTable(db *dynamodb.Client) error {
+ keySchema := []dynamodbTypes.KeySchemaElement{
+ {
+ AttributeName: aws.String("id"),
+ KeyType: dynamodbTypes.KeyTypeHash,
+ },
+ }
+ attributeDefinitions := []dynamodbTypes.AttributeDefinition{
+ {
+ AttributeName: aws.String("id"),
+ AttributeType: dynamodbTypes.ScalarAttributeTypeS,
+ },
+ }
+ streamSpecification := &dynamodbTypes.StreamSpecification{
+ StreamEnabled: aws.Bool(true),
+ StreamViewType: dynamodbTypes.StreamViewTypeNewImage,
+ }
+ _, err := db.CreateTable(context.Background(), &dynamodb.CreateTableInput{
+ TableName: &tableName,
+ KeySchema: keySchema,
+ AttributeDefinitions: attributeDefinitions,
+ BillingMode: dynamodbTypes.BillingModePayPerRequest,
+ StreamSpecification: streamSpecification,
+ })
+ return err
+}
+
+func getLatestStreamArn(db *dynamodb.Client) (*string, error) {
+ input := dynamodb.DescribeTableInput{
+ TableName: &tableName,
+ }
+ tableInfo, err := db.DescribeTable(context.Background(), &input)
+ if err != nil {
+ return nil, err
+ }
+ if nil == tableInfo.Table.LatestStreamArn {
+ return nil, errors.New("empty table stream arn")
+ }
+ return tableInfo.Table.LatestStreamArn, nil
+}
+
+func getDynamoDBStreamShardCount(dbs *dynamodbstreams.Client, streamArn *string) (int64, error) {
+ input := dynamodbstreams.DescribeStreamInput{
+ StreamArn: streamArn,
+ }
+ des, err := dbs.DescribeStream(context.Background(), &input)
+ if err != nil {
+ return -1, err
+ }
+ return int64(len(des.StreamDescription.Shards)), nil
+}
+
+func getTemplateData() (templateData, []Template) {
+ base64AwsAccessKey := base64.StdEncoding.EncodeToString([]byte(awsAccessKey))
+ base64AwsSecretKey := base64.StdEncoding.EncodeToString([]byte(awsSecretKey))
+
+ return templateData{
+ TestNamespace: testNamespace,
+ SecretName: secretName,
+ AwsRegion: awsRegion,
+ AwsAccessKey: base64AwsAccessKey,
+ AwsSecretKey: base64AwsSecretKey,
+ DeploymentName: deploymentName,
+ TriggerAuthName: triggerAuthName,
+ ScaledObjectName: scaledObjectName,
+ TableName: tableName,
+ ShardCount: int64(shardCount),
+ }, []Template{
+ {Name: "deploymentTemplate", Config: deploymentTemplate},
+ {Name: "triggerAuthTemplate", Config: triggerAuthTemplate},
+ {Name: "scaledObjectTemplate", Config: scaledObjectTemplate},
+ }
+}
+
+func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) {
+ t.Log("--- testing activation ---")
+ data.ActivationShardCount = 10
+ KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)
+ AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60)
+}
+
+func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData, shardCount int64) {
+ t.Log("--- testing scale out ---")
+ // Deploy scalerObject with its target shardCount = the current dynamodb streams shard count and check if replicas scale out to 1
+ t.Log("replicas should scale out to 1")
+ data.ShardCount = shardCount
+ data.ActivationShardCount = int64(activationShardCount)
+ KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 180, 1),
+ "replica count should increase to 1")
+
+ // Deploy scalerObject with its shardCount = 1 and check if replicas scale out to 2 (maxReplicaCount)
+ t.Log("then, replicas should scale out to 2")
+ data.ShardCount = 1
+ KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 180, 1),
+ "replica count should increase to 2")
+}
+
+func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData, shardCount int64) {
+ t.Log("--- testing scale in ---")
+ // Deploy scalerObject with its target shardCount = the current dynamodb streams shard count and check if replicas scale in to 1
+ data.ShardCount = shardCount
+ KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 330, 1),
+ "replica count should decrease to 1 in 330 seconds")
+}
+
+func cleanupDynamoDBTable(t *testing.T, db *dynamodb.Client) {
+ t.Log("--- cleaning up ---")
+ _, err := db.DeleteTable(context.Background(),
+ &dynamodb.DeleteTableInput{
+ TableName: &tableName,
+ })
+ assert.NoErrorf(t, err, "cannot delete dynamodb table - %s", err)
+}
diff --git a/tests/scalers/aws/aws_kinesis_stream_pod_identity/aws_kinesis_stream_pod_identity_test.go b/tests/scalers/aws/aws_kinesis_stream_pod_identity/aws_kinesis_stream_pod_identity_test.go
index 69bfd38de08..49c3fde5c26 100644
--- a/tests/scalers/aws/aws_kinesis_stream_pod_identity/aws_kinesis_stream_pod_identity_test.go
+++ b/tests/scalers/aws/aws_kinesis_stream_pod_identity/aws_kinesis_stream_pod_identity_test.go
@@ -49,7 +49,7 @@ metadata:
namespace: {{.TestNamespace}}
spec:
podIdentity:
- provider: aws-eks
+ provider: aws
`
deploymentTemplate = `
diff --git a/tests/scalers/aws/aws_kinesis_stream_pod_identity_eks/aws_kinesis_stream_pod_identity_eks_test.go b/tests/scalers/aws/aws_kinesis_stream_pod_identity_eks/aws_kinesis_stream_pod_identity_eks_test.go
new file mode 100644
index 00000000000..245770f98e6
--- /dev/null
+++ b/tests/scalers/aws/aws_kinesis_stream_pod_identity_eks/aws_kinesis_stream_pod_identity_eks_test.go
@@ -0,0 +1,239 @@
+//go:build e2e
+// +build e2e
+
+package aws_kinesis_stream_pod_identity_eks_test
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis/types"
+ "github.com/joho/godotenv"
+ "github.com/stretchr/testify/assert"
+ "k8s.io/client-go/kubernetes"
+
+ . "github.com/kedacore/keda/v2/tests/helper"
+)
+
+// Load environment variables from .env file
+var _ = godotenv.Load("../../../.env")
+
+const (
+ testName = "aws-kinesis-stream-pod-identity-eks-test"
+)
+
+type templateData struct {
+ TestNamespace string
+ DeploymentName string
+ ScaledObjectName string
+ SecretName string
+ AwsAccessKeyID string
+ AwsSecretAccessKey string
+ AwsRegion string
+ KinesisStream string
+}
+
+const (
+ triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: keda-trigger-auth-aws-credentials
+ namespace: {{.TestNamespace}}
+spec:
+ podIdentity:
+ provider: aws-eks
+`
+
+ deploymentTemplate = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{.DeploymentName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ replicas: 0
+ selector:
+ matchLabels:
+ app: {{.DeploymentName}}
+ template:
+ metadata:
+ labels:
+ app: {{.DeploymentName}}
+ spec:
+ containers:
+ - name: nginx
+ image: nginxinc/nginx-unprivileged
+ ports:
+ - containerPort: 80
+`
+
+ scaledObjectTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: {{.ScaledObjectName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ scaleTargetRef:
+ name: {{.DeploymentName}}
+ maxReplicaCount: 2
+ minReplicaCount: 0
+ cooldownPeriod: 1
+ advanced:
+ horizontalPodAutoscalerConfig:
+ behavior:
+ scaleDown:
+ stabilizationWindowSeconds: 15
+ triggers:
+ - type: aws-kinesis-stream
+ authenticationRef:
+ name: keda-trigger-auth-aws-credentials
+ metadata:
+ awsRegion: {{.AwsRegion}}
+ streamName: {{.KinesisStream}}
+ shardCount: "3"
+ activationShardCount: "4"
+ identityOwner: operator
+`
+)
+
+var (
+ testNamespace = fmt.Sprintf("%s-ns", testName)
+ deploymentName = fmt.Sprintf("%s-deployment", testName)
+ scaledObjectName = fmt.Sprintf("%s-so", testName)
+ secretName = fmt.Sprintf("%s-secret", testName)
+ kinesisStreamName = fmt.Sprintf("kinesis-identity-%d", GetRandomNumber())
+ awsAccessKeyID = os.Getenv("TF_AWS_ACCESS_KEY")
+ awsSecretAccessKey = os.Getenv("TF_AWS_SECRET_KEY")
+ awsRegion = os.Getenv("TF_AWS_REGION")
+ maxReplicaCount = 2
+ minReplicaCount = 0
+)
+
+func TestKiensisScaler(t *testing.T) {
+ // setup kinesis
+ kinesisClient := createKinesisClient()
+ createKinesisStream(t, kinesisClient)
+
+ // Create kubernetes resources
+ kc := GetKubernetesClient(t)
+ data, templates := getTemplateData()
+ CreateKubernetesResources(t, kc, testNamespace, data, templates)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1),
+ "replica count should be %d after 1 minute", minReplicaCount)
+
+ // test scaling
+ testActivation(t, kc, kinesisClient)
+ testScaleOut(t, kc, kinesisClient)
+ testScaleIn(t, kc, kinesisClient)
+
+ // cleanup
+ DeleteKubernetesResources(t, testNamespace, data, templates)
+ cleanupStream(t, kinesisClient)
+}
+
+func testActivation(t *testing.T, kc *kubernetes.Clientset, kinesisClient *kinesis.Client) {
+ t.Log("--- testing activation ---")
+ updateShardCount(t, kinesisClient, 3)
+ AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60)
+}
+
+func testScaleOut(t *testing.T, kc *kubernetes.Clientset, kinesisClient *kinesis.Client) {
+ t.Log("--- testing scale out ---")
+ updateShardCount(t, kinesisClient, 6)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3),
+ "replica count should be %d after 3 minutes", maxReplicaCount)
+}
+
+func testScaleIn(t *testing.T, kc *kubernetes.Clientset, kinesisClient *kinesis.Client) {
+ t.Log("--- testing scale in ---")
+ updateShardCount(t, kinesisClient, 3)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3),
+ "replica count should be %d after 3 minutes", minReplicaCount)
+}
+
+func updateShardCount(t *testing.T, kinesisClient *kinesis.Client, shardCount int64) {
+ done := waitForStreamActiveStatus(t, kinesisClient)
+ if done {
+ _, err := kinesisClient.UpdateShardCount(context.Background(), &kinesis.UpdateShardCountInput{
+ StreamName: &kinesisStreamName,
+ TargetShardCount: aws.Int32(int32(shardCount)),
+ ScalingType: types.ScalingTypeUniformScaling,
+ })
+ assert.NoErrorf(t, err, "cannot update shard count - %s", err)
+ }
+ assert.True(t, true, "failed to update shard count")
+}
+
+func createKinesisStream(t *testing.T, kinesisClient *kinesis.Client) {
+ _, err := kinesisClient.CreateStream(context.Background(), &kinesis.CreateStreamInput{
+ StreamName: &kinesisStreamName,
+ ShardCount: aws.Int32(2),
+ })
+ assert.NoErrorf(t, err, "failed to create stream - %s", err)
+ done := waitForStreamActiveStatus(t, kinesisClient)
+ if !done {
+ assert.True(t, true, "failed to create kinesis")
+ }
+}
+
+func waitForStreamActiveStatus(t *testing.T, kinesisClient *kinesis.Client) bool {
+ for i := 0; i < 30; i++ {
+ describe, _ := kinesisClient.DescribeStream(context.Background(), &kinesis.DescribeStreamInput{
+ StreamName: &kinesisStreamName,
+ })
+ t.Logf("Waiting for stream ACTIVE status. current status - %s", describe.StreamDescription.StreamStatus)
+ if describe.StreamDescription.StreamStatus == "ACTIVE" {
+ return true
+ }
+ time.Sleep(time.Second * 2)
+ }
+ return false
+}
+
+func cleanupStream(t *testing.T, kinesisClient *kinesis.Client) {
+ t.Log("--- cleaning up ---")
+ _, err := kinesisClient.DeleteStream(context.Background(), &kinesis.DeleteStreamInput{
+ StreamName: &kinesisStreamName,
+ })
+ assert.NoErrorf(t, err, "cannot delete stream - %s", err)
+}
+
+func createKinesisClient() *kinesis.Client {
+ configOptions := make([]func(*config.LoadOptions) error, 0)
+ configOptions = append(configOptions, config.WithRegion(awsRegion))
+ cfg, _ := config.LoadDefaultConfig(context.TODO(), configOptions...)
+ cfg.Credentials = credentials.NewStaticCredentialsProvider(awsAccessKeyID, awsSecretAccessKey, "")
+ return kinesis.NewFromConfig(cfg)
+}
+
+func getTemplateData() (templateData, []Template) {
+ return templateData{
+ TestNamespace: testNamespace,
+ DeploymentName: deploymentName,
+ ScaledObjectName: scaledObjectName,
+ SecretName: secretName,
+ AwsAccessKeyID: base64.StdEncoding.EncodeToString([]byte(awsAccessKeyID)),
+ AwsSecretAccessKey: base64.StdEncoding.EncodeToString([]byte(awsSecretAccessKey)),
+ AwsRegion: awsRegion,
+ KinesisStream: kinesisStreamName,
+ }, []Template{
+ {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate},
+ {Name: "deploymentTemplate", Config: deploymentTemplate},
+ {Name: "scaledObjectTemplate", Config: scaledObjectTemplate},
+ }
+}
diff --git a/tests/scalers/aws/aws_sqs_queue_pod_identity/aws_sqs_queue_pod_identity_test.go b/tests/scalers/aws/aws_sqs_queue_pod_identity/aws_sqs_queue_pod_identity_test.go
index 08f2a0b4dab..a47c20989a4 100644
--- a/tests/scalers/aws/aws_sqs_queue_pod_identity/aws_sqs_queue_pod_identity_test.go
+++ b/tests/scalers/aws/aws_sqs_queue_pod_identity/aws_sqs_queue_pod_identity_test.go
@@ -47,7 +47,7 @@ metadata:
namespace: {{.TestNamespace}}
spec:
podIdentity:
- provider: aws-eks
+ provider: aws
`
deploymentTemplate = `
diff --git a/tests/scalers/aws/aws_sqs_queue_pod_identity_eks/aws_sqs_queue_pod_identity_eks_test.go b/tests/scalers/aws/aws_sqs_queue_pod_identity_eks/aws_sqs_queue_pod_identity_eks_test.go
new file mode 100644
index 00000000000..0288c0a169f
--- /dev/null
+++ b/tests/scalers/aws/aws_sqs_queue_pod_identity_eks/aws_sqs_queue_pod_identity_eks_test.go
@@ -0,0 +1,219 @@
+//go:build e2e
+// +build e2e
+
+package aws_sqs_queue_pod_identity_eks_test
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/sqs"
+ "github.com/joho/godotenv"
+ "github.com/stretchr/testify/assert"
+ "k8s.io/client-go/kubernetes"
+
+ . "github.com/kedacore/keda/v2/tests/helper"
+)
+
+// Load environment variables from .env file
+var _ = godotenv.Load("../../../.env")
+
+const (
+ testName = "aws-sqs-queue-pod-identity-eks-test"
+)
+
+type templateData struct {
+ TestNamespace string
+ DeploymentName string
+ ScaledObjectName string
+ SecretName string
+ AwsAccessKeyID string
+ AwsSecretAccessKey string
+ AwsRegion string
+ SqsQueue string
+}
+
+const (
+ triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: keda-trigger-auth-aws-credentials
+ namespace: {{.TestNamespace}}
+spec:
+ podIdentity:
+ provider: aws-eks
+`
+
+ deploymentTemplate = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{.DeploymentName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ replicas: 0
+ selector:
+ matchLabels:
+ app: {{.DeploymentName}}
+ template:
+ metadata:
+ labels:
+ app: {{.DeploymentName}}
+ spec:
+ containers:
+ - name: nginx
+ image: nginxinc/nginx-unprivileged
+ ports:
+ - containerPort: 80
+`
+
+ scaledObjectTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: {{.ScaledObjectName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ scaleTargetRef:
+ name: {{.DeploymentName}}
+ maxReplicaCount: 2
+ minReplicaCount: 0
+ cooldownPeriod: 1
+ triggers:
+ - type: aws-sqs-queue
+ authenticationRef:
+ name: keda-trigger-auth-aws-credentials
+ metadata:
+ awsRegion: {{.AwsRegion}}
+ queueURL: {{.SqsQueue}}
+ queueLength: "1"
+ activationQueueLength: "5"
+ identityOwner: operator
+`
+)
+
+var (
+ testNamespace = fmt.Sprintf("%s-ns", testName)
+ deploymentName = fmt.Sprintf("%s-deployment", testName)
+ scaledObjectName = fmt.Sprintf("%s-so", testName)
+ secretName = fmt.Sprintf("%s-secret", testName)
+ sqsQueueName = fmt.Sprintf("queue-identity-%d", GetRandomNumber())
+ awsAccessKeyID = os.Getenv("TF_AWS_ACCESS_KEY")
+ awsSecretAccessKey = os.Getenv("TF_AWS_SECRET_KEY")
+ awsRegion = os.Getenv("TF_AWS_REGION")
+ maxReplicaCount = 2
+ minReplicaCount = 0
+)
+
+func TestSqsScaler(t *testing.T) {
+ // setup SQS
+ sqsClient := createSqsClient()
+ queue := createSqsQueue(t, sqsClient)
+
+ // Create kubernetes resources
+ kc := GetKubernetesClient(t)
+ data, templates := getTemplateData(*queue.QueueUrl)
+ CreateKubernetesResources(t, kc, testNamespace, data, templates)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1),
+ "replica count should be 0 after 1 minute")
+
+ // test scaling
+ testActivation(t, kc, sqsClient, queue.QueueUrl)
+ testScaleOut(t, kc, sqsClient, queue.QueueUrl)
+ testScaleIn(t, kc, sqsClient, queue.QueueUrl)
+
+ // cleanup
+ DeleteKubernetesResources(t, testNamespace, data, templates)
+ cleanupQueue(t, sqsClient, queue.QueueUrl)
+}
+
+func testActivation(t *testing.T, kc *kubernetes.Clientset, sqsClient *sqs.Client, queueURL *string) {
+ t.Log("--- testing activation ---")
+ addMessages(t, sqsClient, queueURL, 4)
+ AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60)
+}
+
+func testScaleOut(t *testing.T, kc *kubernetes.Clientset, sqsClient *sqs.Client, queueURL *string) {
+ t.Log("--- testing scale out ---")
+ addMessages(t, sqsClient, queueURL, 6)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 180, 1),
+ "replica count should be 2 after 3 minutes")
+}
+
+func testScaleIn(t *testing.T, kc *kubernetes.Clientset, sqsClient *sqs.Client, queueURL *string) {
+ t.Log("--- testing scale in ---")
+ _, err := sqsClient.PurgeQueue(context.Background(), &sqs.PurgeQueueInput{
+ QueueUrl: queueURL,
+ })
+ assert.NoErrorf(t, err, "cannot clear queue - %s", err)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 180, 1),
+ "replica count should be 0 after 3 minutes")
+}
+
+func addMessages(t *testing.T, sqsClient *sqs.Client, queueURL *string, messages int) {
+ for i := 0; i < messages; i++ {
+ msg := fmt.Sprintf("Message - %d", i)
+ _, err := sqsClient.SendMessage(context.Background(), &sqs.SendMessageInput{
+ QueueUrl: queueURL,
+ MessageBody: aws.String(msg),
+ DelaySeconds: 10,
+ })
+ assert.NoErrorf(t, err, "cannot send message - %s", err)
+ }
+}
+
+func createSqsQueue(t *testing.T, sqsClient *sqs.Client) *sqs.CreateQueueOutput {
+ queue, err := sqsClient.CreateQueue(context.Background(), &sqs.CreateQueueInput{
+ QueueName: &sqsQueueName,
+ Attributes: map[string]string{
+ "DelaySeconds": "60",
+ "MessageRetentionPeriod": "86400",
+ }})
+ assert.NoErrorf(t, err, "failed to create queue - %s", err)
+ return queue
+}
+
+func cleanupQueue(t *testing.T, sqsClient *sqs.Client, queueURL *string) {
+ t.Log("--- cleaning up ---")
+ _, err := sqsClient.DeleteQueue(context.Background(), &sqs.DeleteQueueInput{
+ QueueUrl: queueURL,
+ })
+ assert.NoErrorf(t, err, "cannot delete queue - %s", err)
+}
+
+func createSqsClient() *sqs.Client {
+ configOptions := make([]func(*config.LoadOptions) error, 0)
+ configOptions = append(configOptions, config.WithRegion(awsRegion))
+ cfg, _ := config.LoadDefaultConfig(context.TODO(), configOptions...)
+ cfg.Credentials = credentials.NewStaticCredentialsProvider(awsAccessKeyID, awsSecretAccessKey, "")
+ return sqs.NewFromConfig(cfg)
+}
+
+func getTemplateData(sqsQueue string) (templateData, []Template) {
+ return templateData{
+ TestNamespace: testNamespace,
+ DeploymentName: deploymentName,
+ ScaledObjectName: scaledObjectName,
+ SecretName: secretName,
+ AwsAccessKeyID: base64.StdEncoding.EncodeToString([]byte(awsAccessKeyID)),
+ AwsSecretAccessKey: base64.StdEncoding.EncodeToString([]byte(awsSecretAccessKey)),
+ AwsRegion: awsRegion,
+ SqsQueue: sqsQueue,
+ }, []Template{
+ {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate},
+ {Name: "deploymentTemplate", Config: deploymentTemplate},
+ {Name: "scaledObjectTemplate", Config: scaledObjectTemplate},
+ }
+}
diff --git a/tests/secret-providers/aws_identity_assume_role/aws_identity_assume_role_test.go b/tests/secret-providers/aws_identity_assume_role/aws_identity_assume_role_test.go
new file mode 100644
index 00000000000..98bd66c8508
--- /dev/null
+++ b/tests/secret-providers/aws_identity_assume_role/aws_identity_assume_role_test.go
@@ -0,0 +1,327 @@
+//go:build e2e
+// +build e2e
+
+package aws_identity_assume_role_test
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/sqs"
+ "github.com/joho/godotenv"
+ "github.com/stretchr/testify/assert"
+ "k8s.io/client-go/kubernetes"
+
+ . "github.com/kedacore/keda/v2/tests/helper"
+)
+
+// Load environment variables from .env file
+var _ = godotenv.Load("../../../.env")
+
+const (
+ testName = "aws-identity-assume-role-test"
+)
+
+type templateData struct {
+ TestNamespace string
+ DeploymentName string
+ ScaledObjectName string
+ TriggerAuthenticationName string
+ SecretName string
+ AwsRegion string
+ RoleArn string
+ SqsQueue string
+}
+
+const (
+ triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: {{.TriggerAuthenticationName}}
+ namespace: {{.TestNamespace}}
+spec:
+ podIdentity:
+ provider: aws
+`
+
+ triggerAuthTemplateWithRoleArn = `apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: {{.TriggerAuthenticationName}}
+ namespace: {{.TestNamespace}}
+spec:
+ podIdentity:
+ provider: aws
+ roleArn: {{.RoleArn}}
+`
+
+ triggerAuthTemplateWithIdentityOwner = `apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: {{.TriggerAuthenticationName}}
+ namespace: {{.TestNamespace}}
+spec:
+ podIdentity:
+ provider: aws
+ identityOwner: workload
+`
+
+ serviceAccountTemplate = `apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: workload
+ namespace: {{.TestNamespace}}
+ annotations:
+ eks.amazonaws.com/role-arn: {{.RoleArn}}
+`
+
+ deploymentTemplate = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{.DeploymentName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ replicas: 0
+ selector:
+ matchLabels:
+ app: {{.DeploymentName}}
+ template:
+ metadata:
+ labels:
+ app: {{.DeploymentName}}
+ spec:
+ serviceAccountName: workload
+ containers:
+ - name: nginx
+ image: nginxinc/nginx-unprivileged
+ ports:
+ - containerPort: 80
+`
+
+ scaledObjectTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: {{.ScaledObjectName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ scaleTargetRef:
+ name: {{.DeploymentName}}
+ maxReplicaCount: 1
+ minReplicaCount: 0
+ pollingInterval: 5
+ cooldownPeriod: 1
+ triggers:
+ - type: aws-sqs-queue
+ authenticationRef:
+ name: {{.TriggerAuthenticationName}}
+ metadata:
+ awsRegion: {{.AwsRegion}}
+ queueURL: {{.SqsQueue}}
+ queueLength: "1"
+`
+)
+
+var (
+ testNamespace = fmt.Sprintf("%s-ns", testName)
+ deploymentName = fmt.Sprintf("%s-deployment", testName)
+ scaledObjectName = fmt.Sprintf("%s-so", testName)
+ secretName = fmt.Sprintf("%s-secret", testName)
+ sqsWorkload1QueueName = fmt.Sprintf("assume-role-workload1-queue-%d", GetRandomNumber())
+ sqsWorkload2QueueName = fmt.Sprintf("assume-role-workload2-queue-%d", GetRandomNumber())
+ awsAccessKeyID = os.Getenv("TF_AWS_ACCESS_KEY")
+ awsSecretAccessKey = os.Getenv("TF_AWS_SECRET_KEY")
+ awsRegion = os.Getenv("TF_AWS_REGION")
+ awsWorkload1RoleArn = os.Getenv("TF_AWS_WORKLOAD1_ROLE")
+ awsWorkload2RoleArn = os.Getenv("TF_AWS_WORKLOAD2_ROLE")
+ maxReplicaCount = 1
+ minReplicaCount = 0
+ sqsMessageCount = 2
+)
+
+func TestSqsScaler(t *testing.T) {
+ // setup SQS
+ sqsClient := createSqsClient()
+ queueWorkload1 := createSqsQueue(t, sqsWorkload1QueueName, sqsClient)
+ queueWorkload2 := createSqsQueue(t, sqsWorkload2QueueName, sqsClient)
+
+ // Create kubernetes resources
+ kc := GetKubernetesClient(t)
+ data, templates := getTemplateData(*queueWorkload1.QueueUrl)
+ CreateKubernetesResources(t, kc, testNamespace, data, templates)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1),
+ "replica count should be 0 after 1 minute")
+
+ // test scaling using KEDA identity
+ testScaleWithKEDAIdentity(t, kc, data, sqsClient, queueWorkload1.QueueUrl)
+ // test scaling using correct identity provided via podIdentity.RoleArn
+ // for a role that can be assumed
+ testScaleWithExplicitRoleArnUsingRoleAssumtion(t, kc, data, sqsClient, queueWorkload1.QueueUrl)
+ // test scaling using correct identity provided via podIdentity.RoleArn
+ // for a role to be used with web indentity (workload-2 role allows it)
+ testScaleWithExplicitRoleArnUsingWebIdentityRole(t, kc, data, sqsClient, queueWorkload2.QueueUrl)
+ // test scaling using correct identity provided via workload
+ testScaleWithWorkloadArn(t, kc, data, sqsClient, queueWorkload1.QueueUrl)
+
+ // cleanup
+ DeleteKubernetesResources(t, testNamespace, data, templates)
+ cleanupQueue(t, sqsClient, queueWorkload1.QueueUrl)
+ cleanupQueue(t, sqsClient, queueWorkload2.QueueUrl)
+}
+
+// testScaleWithKEDAIdentity checks that we don't scale out because KEDA identity
+// doesn't have access to the queue, so even though there are messages, the workload
+// won't scale
+func testScaleWithKEDAIdentity(t *testing.T, kc *kubernetes.Clientset, data templateData, sqsClient *sqs.Client, queueURL *string) {
+ t.Log("--- testing scalig out with KEDA role ---")
+ data.ScaledObjectName = "scale-with-keda-identity"
+ data.TriggerAuthenticationName = "scale-with-keda-identity"
+ KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)
+ KubectlApplyWithTemplate(t, data, "triggerAuthTemplate", triggerAuthenticationTemplate)
+ addMessages(t, sqsClient, queueURL, sqsMessageCount)
+ // replicas shouldn't change
+ AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60)
+ testScaleIn(t, kc, sqsClient, queueURL)
+ KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)
+ KubectlDeleteWithTemplate(t, data, "triggerAuthTemplate", triggerAuthenticationTemplate)
+}
+
+func testScaleWithExplicitRoleArnUsingRoleAssumtion(t *testing.T, kc *kubernetes.Clientset, data templateData, sqsClient *sqs.Client, queueURL *string) {
+ t.Log("--- testing scalig out with explicit arn role with role assumption ---")
+ data.ScaledObjectName = "scale-using-role-assumtion"
+ data.TriggerAuthenticationName = "scale-using-role-assumtion"
+ KubectlApplyWithTemplate(t, data, "triggerAuthTemplateWithIdentityID", triggerAuthTemplateWithRoleArn)
+ KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)
+ addMessages(t, sqsClient, queueURL, sqsMessageCount)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 180, 1),
+ "replica count should be 2 after 3 minutes")
+ testScaleIn(t, kc, sqsClient, queueURL)
+ KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)
+ KubectlDeleteWithTemplate(t, data, "triggerAuthTemplate", triggerAuthTemplateWithRoleArn)
+}
+
+func testScaleWithExplicitRoleArnUsingWebIdentityRole(t *testing.T, kc *kubernetes.Clientset, data templateData, sqsClient *sqs.Client, queueURL *string) {
+ t.Log("--- testing scalig out with explicit arn role with web indentity role ---")
+ data.RoleArn = awsWorkload2RoleArn
+ data.SqsQueue = *queueURL
+ data.ScaledObjectName = "scale-using-web-identity"
+ data.TriggerAuthenticationName = "scale-using-web-identity"
+ KubectlApplyWithTemplate(t, data, "triggerAuthTemplateWithIdentityID", triggerAuthTemplateWithRoleArn)
+ KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)
+ addMessages(t, sqsClient, queueURL, sqsMessageCount)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 180, 1),
+ "replica count should be 2 after 3 minutes")
+ testScaleIn(t, kc, sqsClient, queueURL)
+ KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)
+ KubectlDeleteWithTemplate(t, data, "triggerAuthTemplate", triggerAuthTemplateWithRoleArn)
+}
+
+func testScaleWithWorkloadArn(t *testing.T, kc *kubernetes.Clientset, data templateData, sqsClient *sqs.Client, queueURL *string) {
+ t.Log("--- testing scalig out with workload arn role ---")
+ data.ScaledObjectName = "scale-using-workload-arn"
+ data.TriggerAuthenticationName = "scale-using-workload-arn"
+ KubectlApplyWithTemplate(t, data, "triggerAuthTemplateWithIdentityOwner", triggerAuthTemplateWithIdentityOwner)
+ KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)
+ addMessages(t, sqsClient, queueURL, sqsMessageCount)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 180, 1),
+ "replica count should be 2 after 3 minutes")
+ testScaleIn(t, kc, sqsClient, queueURL)
+ KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)
+ KubectlDeleteWithTemplate(t, data, "triggerAuthTemplateWithIdentityOwner", triggerAuthTemplateWithIdentityOwner)
+}
+
+func testScaleIn(t *testing.T, kc *kubernetes.Clientset, sqsClient *sqs.Client, queueURL *string) {
+ t.Log("--- testing scalig in ---")
+ totalDeletedMessages := 0
+
+ for {
+ response, _ := sqsClient.ReceiveMessage(context.Background(), &sqs.ReceiveMessageInput{
+ QueueUrl: queueURL,
+ MaxNumberOfMessages: int32(sqsMessageCount),
+ })
+ if response != nil {
+ for _, message := range response.Messages {
+ _, err := sqsClient.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{
+ QueueUrl: queueURL,
+ ReceiptHandle: message.ReceiptHandle,
+ })
+ assert.NoErrorf(t, err, "cannot delete message - %s", err)
+ totalDeletedMessages++
+ }
+ }
+ if totalDeletedMessages == sqsMessageCount {
+ break
+ }
+
+ time.Sleep(time.Second)
+ }
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 180, 1),
+ "replica count should be 0 after 3 minutes")
+}
+
+func addMessages(t *testing.T, sqsClient *sqs.Client, queueURL *string, messages int) {
+ for i := 0; i < messages; i++ {
+ msg := fmt.Sprintf("Message - %d", i)
+ _, err := sqsClient.SendMessage(context.Background(), &sqs.SendMessageInput{
+ QueueUrl: queueURL,
+ MessageBody: aws.String(msg),
+ DelaySeconds: 10,
+ })
+ assert.NoErrorf(t, err, "cannot send message - %s", err)
+ }
+}
+
+func createSqsQueue(t *testing.T, queueName string, sqsClient *sqs.Client) *sqs.CreateQueueOutput {
+ queue, err := sqsClient.CreateQueue(context.Background(), &sqs.CreateQueueInput{
+ QueueName: &queueName,
+ Attributes: map[string]string{
+ "DelaySeconds": "60",
+ "MessageRetentionPeriod": "86400",
+ }})
+ assert.NoErrorf(t, err, "failed to create queue - %s", err)
+ return queue
+}
+
+func cleanupQueue(t *testing.T, sqsClient *sqs.Client, queueURL *string) {
+ t.Log("--- cleaning up ---")
+ _, err := sqsClient.DeleteQueue(context.Background(), &sqs.DeleteQueueInput{
+ QueueUrl: queueURL,
+ })
+ assert.NoErrorf(t, err, "cannot delete queue - %s", err)
+}
+
+func createSqsClient() *sqs.Client {
+ configOptions := make([]func(*config.LoadOptions) error, 0)
+ configOptions = append(configOptions, config.WithRegion(awsRegion))
+ cfg, _ := config.LoadDefaultConfig(context.TODO(), configOptions...)
+ cfg.Credentials = credentials.NewStaticCredentialsProvider(awsAccessKeyID, awsSecretAccessKey, "")
+ return sqs.NewFromConfig(cfg)
+}
+
+func getTemplateData(sqsQueue string) (templateData, []Template) {
+ return templateData{
+ TestNamespace: testNamespace,
+ DeploymentName: deploymentName,
+ ScaledObjectName: scaledObjectName,
+ SecretName: secretName,
+ AwsRegion: awsRegion,
+ RoleArn: awsWorkload1RoleArn,
+ SqsQueue: sqsQueue,
+ }, []Template{
+ {Name: "deploymentTemplate", Config: deploymentTemplate},
+ {Name: "serviceAccountTemplate", Config: serviceAccountTemplate},
+ }
+}
diff --git a/vendor/github.com/valyala/fasthttp/coarseTime.go b/vendor/github.com/valyala/fasthttp/coarsetime.go
similarity index 100%
rename from vendor/github.com/valyala/fasthttp/coarseTime.go
rename to vendor/github.com/valyala/fasthttp/coarsetime.go