diff --git a/.github/workflows/pr-e2e.yml b/.github/workflows/pr-e2e.yml index a9b65e4c33e..7c0d80b9e51 100644 --- a/.github/workflows/pr-e2e.yml +++ b/.github/workflows/pr-e2e.yml @@ -254,7 +254,7 @@ jobs: details_url: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}} - name: Upload test logs - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4 with: name: e2e-test-logs path: "${{ github.workspace }}/**/*.log" diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 4928cd0f565..b1afd55464c 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -138,7 +138,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.x - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 diff --git a/.github/workflows/template-main-e2e-test.yml b/.github/workflows/template-main-e2e-test.yml index 2196cceafc2..b8c993e5854 100644 --- a/.github/workflows/template-main-e2e-test.yml +++ b/.github/workflows/template-main-e2e-test.yml @@ -47,7 +47,7 @@ jobs: NODE_POOL_SIZE: 1 - name: Upload test logs - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4 if: ${{ always() }} with: name: e2e-test-logs diff --git a/.github/workflows/template-smoke-tests.yml b/.github/workflows/template-smoke-tests.yml index b4998101bfd..fc3a39f112b 100644 --- a/.github/workflows/template-smoke-tests.yml +++ b/.github/workflows/template-smoke-tests.yml @@ -44,7 +44,7 @@ jobs: run: make smoke-test - name: Upload test logs - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4 if: ${{ always() }} with: name: smoke-test-logs ${{ inputs.runs-on }}-${{ inputs.kubernetesVersion }} diff --git a/BUILD.md b/BUILD.md index fc80fe51ec1..560083ef37f 100644 --- a/BUILD.md +++ b/BUILD.md @@ -5,7 +5,7 @@ **Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* - [Building](#building) - - [Quick start with Visual Studio Code Remote - Containers](#quick-start-with-visual-studio-code-remote---containers) + - [Quick start with Visual Studio Code Dev Containers](#quick-start-with-visual-studio-code-dev-containers) - [Locally directly](#locally-directly) - [Deploying](#deploying) - [Custom KEDA locally outside cluster](#custom-keda-locally-outside-cluster) @@ -25,14 +25,14 @@ ## Building -### Quick start with [Visual Studio Code Remote - Containers](https://code.visualstudio.com/docs/remote/containers) +### Quick start with [Visual Studio Code Dev Containers](https://code.visualstudio.com/docs/remote/containers) This helps you pull and build quickly - dev containers launch the project inside a container with all the tooling required for a consistent and seamless developer experience. This means you don't have to install and configure your dev environment as the container handles this for you. -To get started install [VSCode](https://code.visualstudio.com/) and the [Remote Containers extensions]( +To get started install [VSCode](https://code.visualstudio.com/) and the [Dev Containers extensions]( https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) Clone the repo and launch code: @@ -43,7 +43,7 @@ cd keda code . ``` -Once VSCode launches run `CTRL+SHIFT+P -> Remote-Containers: Reopen in container` and then use the integrated +Once VSCode launches run `CTRL+SHIFT+P -> Dev Containers: Reopen in container` and then use the integrated terminal to run: ```bash diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f86988e080..ae977f01872 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,7 +58,8 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ### New - **CloudEventSource**: Introduce ClusterCloudEventSource ([#3533](https://github.com/kedacore/keda/issues/3533)) -- **CloudEventSource**: Provide CloudEvents around the management of ScaledJobs resources ([#3523](https://github.com/kedacore/keda/issues/3523)) +- **CloudEventSource**: Provide ClusterCloudEventSource around the management of ScaledJobs resources ([#3523](https://github.com/kedacore/keda/issues/3523)) +- **CloudEventSource**: Provide ClusterCloudEventSource around the management of TriggerAuthentication/ClusterTriggerAuthentication resources ([#3524](https://github.com/kedacore/keda/issues/3524)) #### Experimental @@ -91,7 +92,7 @@ New deprecation(s): ### Breaking Changes -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +- **Pulsar Scaler**: remove `msgBacklog` trigger name ([#6059](https://github.com/kedacore/keda/issues/6059)) ### Other diff --git a/apis/eventing/v1alpha1/cloudevent_types.go b/apis/eventing/v1alpha1/cloudevent_types.go index c3cfaecad89..c0abd97dd15 100644 --- a/apis/eventing/v1alpha1/cloudevent_types.go +++ b/apis/eventing/v1alpha1/cloudevent_types.go @@ -17,7 +17,8 @@ limitations under the License. package v1alpha1 // CloudEventType contains the list of cloudevent types -// +kubebuilder:validation:Enum=keda.scaledobject.ready.v1;keda.scaledobject.failed.v1;keda.scaledobject.removed.v1;keda.scaledjob.ready.v1;keda.scaledjob.failed.v1;keda.scaledjob.removed.v1 +// +kubebuilder:validation:Enum=keda.scaledobject.ready.v1;keda.scaledobject.failed.v1;keda.scaledobject.removed.v1;keda.scaledjob.ready.v1;keda.scaledjob.failed.v1;keda.scaledjob.removed.v1;keda.authentication.triggerauthentication.created.v1;keda.authentication.triggerauthentication.updated.v1;keda.authentication.triggerauthentication.removed.v1;keda.authentication.clustertriggerauthentication.created.v1;keda.authentication.clustertriggerauthentication.updated.v1;keda.authentication.clustertriggerauthentication.removed.v1 + type CloudEventType string const ( @@ -38,6 +39,24 @@ const ( // ScaledJobRemovedType is for event when removed ScaledJob ScaledJobRemovedType CloudEventType = "keda.scaledjob.removed.v1" + + // TriggerAuthenticationCreatedType is for event when a new TriggerAuthentication is created + TriggerAuthenticationCreatedType CloudEventType = "keda.authentication.triggerauthentication.created.v1" + + // TriggerAuthenticationUpdatedType is for event when a TriggerAuthentication is updated + TriggerAuthenticationUpdatedType CloudEventType = "keda.authentication.triggerauthentication.updated.v1" + + // TriggerAuthenticationRemovedType is for event when a TriggerAuthentication is deleted + TriggerAuthenticationRemovedType CloudEventType = "keda.authentication.triggerauthentication.removed.v1" + + // ClusterTriggerAuthenticationCreatedType is for event when a new ClusterTriggerAuthentication is created + ClusterTriggerAuthenticationCreatedType CloudEventType = "keda.authentication.clustertriggerauthentication.created.v1" + + // ClusterTriggerAuthenticationCreatedType is for event when a ClusterTriggerAuthentication is updated + ClusterTriggerAuthenticationUpdatedType CloudEventType = "keda.authentication.clustertriggerauthentication.updated.v1" + + // ClusterTriggerAuthenticationRemovedType is for event when a ClusterTriggerAuthentication is deleted + ClusterTriggerAuthenticationRemovedType CloudEventType = "keda.authentication.clustertriggerauthentication.removed.v1" ) var AllEventTypes = []CloudEventType{ diff --git a/cmd/operator/main.go b/cmd/operator/main.go index aa81dc79fce..16f6899d230 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -246,15 +246,15 @@ func main() { os.Exit(1) } if err = (&kedacontrollers.TriggerAuthenticationReconciler{ - Client: mgr.GetClient(), - EventRecorder: eventRecorder, + Client: mgr.GetClient(), + EventHandler: eventEmitter, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "TriggerAuthentication") os.Exit(1) } if err = (&kedacontrollers.ClusterTriggerAuthenticationReconciler{ - Client: mgr.GetClient(), - EventRecorder: eventRecorder, + Client: mgr.GetClient(), + EventHandler: eventEmitter, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ClusterTriggerAuthentication") os.Exit(1) diff --git a/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml b/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml index d121df22074..53e8bcfe504 100644 --- a/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml +++ b/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml @@ -83,8 +83,6 @@ spec: properties: excludedEventTypes: items: - description: CloudEventType contains the list of cloudevent - types enum: - keda.scaledobject.ready.v1 - keda.scaledobject.failed.v1 @@ -92,12 +90,16 @@ spec: - keda.scaledjob.ready.v1 - keda.scaledjob.failed.v1 - keda.scaledjob.removed.v1 + - keda.authentication.triggerauthentication.created.v1 + - keda.authentication.triggerauthentication.updated.v1 + - keda.authentication.triggerauthentication.removed.v1 + - keda.authentication.clustertriggerauthentication.created.v1 + - keda.authentication.clustertriggerauthentication.updated.v1 + - keda.authentication.clustertriggerauthentication.removed.v1 type: string type: array includedEventTypes: items: - description: CloudEventType contains the list of cloudevent - types enum: - keda.scaledobject.ready.v1 - keda.scaledobject.failed.v1 @@ -105,6 +107,12 @@ spec: - keda.scaledjob.ready.v1 - keda.scaledjob.failed.v1 - keda.scaledjob.removed.v1 + - keda.authentication.triggerauthentication.created.v1 + - keda.authentication.triggerauthentication.updated.v1 + - keda.authentication.triggerauthentication.removed.v1 + - keda.authentication.clustertriggerauthentication.created.v1 + - keda.authentication.clustertriggerauthentication.updated.v1 + - keda.authentication.clustertriggerauthentication.removed.v1 type: string type: array type: object diff --git a/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml b/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml index 8c81de1f594..b5d07384483 100644 --- a/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml +++ b/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml @@ -81,8 +81,6 @@ spec: properties: excludedEventTypes: items: - description: CloudEventType contains the list of cloudevent - types enum: - keda.scaledobject.ready.v1 - keda.scaledobject.failed.v1 @@ -90,12 +88,16 @@ spec: - keda.scaledjob.ready.v1 - keda.scaledjob.failed.v1 - keda.scaledjob.removed.v1 + - keda.authentication.triggerauthentication.created.v1 + - keda.authentication.triggerauthentication.updated.v1 + - keda.authentication.triggerauthentication.removed.v1 + - keda.authentication.clustertriggerauthentication.created.v1 + - keda.authentication.clustertriggerauthentication.updated.v1 + - keda.authentication.clustertriggerauthentication.removed.v1 type: string type: array includedEventTypes: items: - description: CloudEventType contains the list of cloudevent - types enum: - keda.scaledobject.ready.v1 - keda.scaledobject.failed.v1 @@ -103,6 +105,12 @@ spec: - keda.scaledjob.ready.v1 - keda.scaledjob.failed.v1 - keda.scaledjob.removed.v1 + - keda.authentication.triggerauthentication.created.v1 + - keda.authentication.triggerauthentication.updated.v1 + - keda.authentication.triggerauthentication.removed.v1 + - keda.authentication.clustertriggerauthentication.created.v1 + - keda.authentication.clustertriggerauthentication.updated.v1 + - keda.authentication.clustertriggerauthentication.removed.v1 type: string type: array type: object diff --git a/controllers/keda/clustertriggerauthentication_controller.go b/controllers/keda/clustertriggerauthentication_controller.go index ceb7ae6185c..aabab91c4c3 100644 --- a/controllers/keda/clustertriggerauthentication_controller.go +++ b/controllers/keda/clustertriggerauthentication_controller.go @@ -18,18 +18,21 @@ package keda import ( "context" + "fmt" "sync" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" + eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + "github.com/kedacore/keda/v2/pkg/common/message" + "github.com/kedacore/keda/v2/pkg/eventemitter" "github.com/kedacore/keda/v2/pkg/eventreason" "github.com/kedacore/keda/v2/pkg/metricscollector" ) @@ -37,7 +40,7 @@ import ( // ClusterTriggerAuthenticationReconciler reconciles a ClusterTriggerAuthentication object type ClusterTriggerAuthenticationReconciler struct { client.Client - record.EventRecorder + eventemitter.EventHandler } type clusterTriggerAuthMetricsData struct { @@ -80,8 +83,12 @@ func (r *ClusterTriggerAuthenticationReconciler) Reconcile(ctx context.Context, r.updatePromMetrics(clusterTriggerAuthentication, req.NamespacedName.String()) if clusterTriggerAuthentication.ObjectMeta.Generation == 1 { - r.EventRecorder.Event(clusterTriggerAuthentication, corev1.EventTypeNormal, eventreason.ClusterTriggerAuthenticationAdded, "New ClusterTriggerAuthentication configured") + r.Emit(clusterTriggerAuthentication, req.NamespacedName.Namespace, corev1.EventTypeNormal, eventingv1alpha1.ClusterTriggerAuthenticationCreatedType, eventreason.ClusterTriggerAuthenticationAdded, message.ClusterTriggerAuthenticationCreatedMsg) + } else { + msg := fmt.Sprintf(message.ClusterTriggerAuthenticationUpdatedMsg, clusterTriggerAuthentication.Name) + r.Emit(clusterTriggerAuthentication, req.NamespacedName.Namespace, corev1.EventTypeNormal, eventingv1alpha1.ClusterTriggerAuthenticationUpdatedType, eventreason.ClusterTriggerAuthenticationUpdated, msg) } + return ctrl.Result{}, nil } diff --git a/controllers/keda/triggerauthentication_controller.go b/controllers/keda/triggerauthentication_controller.go index edca7ea8efc..b5ab9e1bd82 100755 --- a/controllers/keda/triggerauthentication_controller.go +++ b/controllers/keda/triggerauthentication_controller.go @@ -18,18 +18,21 @@ package keda import ( "context" + "fmt" "sync" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" + eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + "github.com/kedacore/keda/v2/pkg/common/message" + "github.com/kedacore/keda/v2/pkg/eventemitter" "github.com/kedacore/keda/v2/pkg/eventreason" "github.com/kedacore/keda/v2/pkg/metricscollector" "github.com/kedacore/keda/v2/pkg/util" @@ -38,7 +41,7 @@ import ( // TriggerAuthenticationReconciler reconciles a TriggerAuthentication object type TriggerAuthenticationReconciler struct { client.Client - record.EventRecorder + eventemitter.EventHandler } type triggerAuthMetricsData struct { @@ -81,7 +84,10 @@ func (r *TriggerAuthenticationReconciler) Reconcile(ctx context.Context, req ctr r.updatePromMetrics(triggerAuthentication, req.NamespacedName.String()) if triggerAuthentication.ObjectMeta.Generation == 1 { - r.EventRecorder.Event(triggerAuthentication, corev1.EventTypeNormal, eventreason.TriggerAuthenticationAdded, "New TriggerAuthentication configured") + r.Emit(triggerAuthentication, req.NamespacedName.Namespace, corev1.EventTypeNormal, eventingv1alpha1.TriggerAuthenticationCreatedType, eventreason.TriggerAuthenticationAdded, message.TriggerAuthenticationCreatedMsg) + } else { + msg := fmt.Sprintf(message.TriggerAuthenticationUpdatedMsg, triggerAuthentication.Name) + r.Emit(triggerAuthentication, req.NamespacedName.Namespace, corev1.EventTypeNormal, eventingv1alpha1.TriggerAuthenticationUpdatedType, eventreason.TriggerAuthenticationUpdated, msg) } return ctrl.Result{}, nil diff --git a/controllers/keda/util/finalizer.go b/controllers/keda/util/finalizer.go index c47343822e0..eb9d31db0a9 100644 --- a/controllers/keda/util/finalizer.go +++ b/controllers/keda/util/finalizer.go @@ -6,10 +6,11 @@ import ( "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" + eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + "github.com/kedacore/keda/v2/pkg/eventemitter" "github.com/kedacore/keda/v2/pkg/eventreason" ) @@ -19,7 +20,7 @@ const ( type authenticationReconciler interface { client.Client - record.EventRecorder + eventemitter.EventHandler UpdatePromMetricsOnDelete(string) } @@ -48,13 +49,16 @@ func EnsureAuthenticationResourceFinalizer(ctx context.Context, logger logr.Logg func FinalizeAuthenticationResource(ctx context.Context, logger logr.Logger, reconciler authenticationReconciler, authResource client.Object, namespacedName string) error { var authResourceType, reason string + var cloudEventType eventingv1alpha1.CloudEventType switch authResource.(type) { case *kedav1alpha1.TriggerAuthentication: authResourceType = "TriggerAuthentication" reason = eventreason.TriggerAuthenticationDeleted + cloudEventType = eventingv1alpha1.TriggerAuthenticationRemovedType case *kedav1alpha1.ClusterTriggerAuthentication: authResourceType = "ClusterTriggerAuthentication" reason = eventreason.ClusterTriggerAuthenticationDeleted + cloudEventType = eventingv1alpha1.ClusterTriggerAuthenticationRemovedType } if Contains(authResource.GetFinalizers(), authenticationFinalizer) { @@ -68,6 +72,6 @@ func FinalizeAuthenticationResource(ctx context.Context, logger logr.Logger, rec } logger.Info(fmt.Sprintf("Successfully finalized %s", authResourceType)) - reconciler.Event(authResource, corev1.EventTypeNormal, reason, fmt.Sprintf("%s was deleted", authResourceType)) + reconciler.Emit(authResource, namespacedName, corev1.EventTypeNormal, cloudEventType, reason, fmt.Sprintf("%s was deleted", authResourceType)) return nil } diff --git a/pkg/common/message/message.go b/pkg/common/message/message.go index b4a6458f3e2..4abcc0402ff 100644 --- a/pkg/common/message/message.go +++ b/pkg/common/message/message.go @@ -34,4 +34,12 @@ const ( ScaledJobReadyMsg = "ScaledJob is ready for scaling" ScaledJobRemoved = "ScaledJob was deleted" + + TriggerAuthenticationCreatedMsg = "New TriggerAuthentication configured" + + TriggerAuthenticationUpdatedMsg = "ClusterTriggerAuthentication %s is updated" + + ClusterTriggerAuthenticationCreatedMsg = "New ClusterTriggerAuthentication configured" + + ClusterTriggerAuthenticationUpdatedMsg = "ClusterTriggerAuthentication %s is updated" ) diff --git a/pkg/eventreason/eventreason.go b/pkg/eventreason/eventreason.go index 6fbc854ddc6..dd41cb6639a 100644 --- a/pkg/eventreason/eventreason.go +++ b/pkg/eventreason/eventreason.go @@ -77,6 +77,9 @@ const ( // TriggerAuthenticationFailed is for event when a TriggerAuthentication occurs error TriggerAuthenticationFailed = "TriggerAuthenticationFailed" + // TriggerAuthenticationUpdated is for event when a TriggerAuthentication is updated + TriggerAuthenticationUpdated = "ClusterTriggerAuthenticationUpdated" + // ClusterTriggerAuthenticationDeleted is for event when a ClusterTriggerAuthentication is deleted ClusterTriggerAuthenticationDeleted = "ClusterTriggerAuthenticationDeleted" @@ -85,4 +88,7 @@ const ( // ClusterTriggerAuthenticationFailed is for event when a ClusterTriggerAuthentication occurs error ClusterTriggerAuthenticationFailed = "ClusterTriggerAuthenticationFailed" + + // ClusterTriggerAuthenticationUpdated is for event when a ClusterTriggerAuthentication is updated + ClusterTriggerAuthenticationUpdated = "ClusterTriggerAuthenticationUpdated" ) diff --git a/pkg/scalers/aws_dynamodb_scaler.go b/pkg/scalers/aws_dynamodb_scaler.go index 72aff9beb46..f3c9bcac1f6 100644 --- a/pkg/scalers/aws_dynamodb_scaler.go +++ b/pkg/scalers/aws_dynamodb_scaler.go @@ -5,7 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "strconv" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/dynamodb" @@ -28,18 +27,18 @@ type awsDynamoDBScaler struct { } type awsDynamoDBMetadata struct { - tableName string - awsRegion string - awsEndpoint string - keyConditionExpression string - expressionAttributeNames map[string]string - expressionAttributeValues map[string]types.AttributeValue - indexName string - targetValue int64 - activationTargetValue int64 awsAuthorization awsutils.AuthorizationMetadata + expressionAttributeValues map[string]types.AttributeValue + expressionAttributeNames map[string]string triggerIndex int metricName string + TableName string `keda:"name=tableName, order=triggerMetadata"` + AwsRegion string `keda:"name=awsRegion, order=triggerMetadata"` + AwsEndpoint string `keda:"name=awsEndpoint, order=triggerMetadata, optional"` + KeyConditionExpression string `keda:"name=keyConditionExpression, order=triggerMetadata"` + IndexName string `keda:"name=indexName, order=triggerMetadata, optional"` + TargetValue int64 `keda:"name=targetValue, order=triggerMetadata, optional, default=-1"` + ActivationTargetValue int64 `keda:"name=activationTargetValue, order=triggerMetadata, default=0"` } func NewAwsDynamoDBScaler(ctx context.Context, config *scalersconfig.ScalerConfig) (Scaler, error) { @@ -65,63 +64,24 @@ func NewAwsDynamoDBScaler(ctx context.Context, config *scalersconfig.ScalerConfi } var ( - // ErrAwsDynamoNoTableName is returned when "tableName" is missing from the config. - ErrAwsDynamoNoTableName = errors.New("no tableName given") - - // ErrAwsDynamoNoAwsRegion is returned when "awsRegion" is missing from the config. - ErrAwsDynamoNoAwsRegion = errors.New("no awsRegion given") - - // ErrAwsDynamoNoKeyConditionExpression is returned when "keyConditionExpression" is missing from the config. - ErrAwsDynamoNoKeyConditionExpression = errors.New("no keyConditionExpression given") - - // ErrAwsDynamoEmptyExpressionAttributeNames is returned when "expressionAttributeNames" is empty. - ErrAwsDynamoEmptyExpressionAttributeNames = errors.New("empty map") - - // ErrAwsDynamoInvalidExpressionAttributeNames is returned when "expressionAttributeNames" is an invalid JSON. - ErrAwsDynamoInvalidExpressionAttributeNames = errors.New("invalid expressionAttributeNames") - - // ErrAwsDynamoNoExpressionAttributeNames is returned when "expressionAttributeNames" is missing from the config. - ErrAwsDynamoNoExpressionAttributeNames = errors.New("no expressionAttributeNames given") - + ErrAwsDynamoNoTargetValue = errors.New("no targetValue given") // ErrAwsDynamoInvalidExpressionAttributeValues is returned when "expressionAttributeNames" is missing an invalid JSON. ErrAwsDynamoInvalidExpressionAttributeValues = errors.New("invalid expressionAttributeValues") - // ErrAwsDynamoNoExpressionAttributeValues is returned when "expressionAttributeValues" is missing from the config. ErrAwsDynamoNoExpressionAttributeValues = errors.New("no expressionAttributeValues given") - - // ErrAwsDynamoNoTargetValue is returned when "targetValue" is missing from the config. - ErrAwsDynamoNoTargetValue = errors.New("no targetValue given") + // ErrAwsDynamoInvalidExpressionAttributeNames is returned when "expressionAttributeNames" is an invalid JSON. + ErrAwsDynamoInvalidExpressionAttributeNames = errors.New("invalid expressionAttributeNames") + // ErrAwsDynamoEmptyExpressionAttributeNames is returned when "expressionAttributeNames" is empty. + ErrAwsDynamoEmptyExpressionAttributeNames = errors.New("empty map") + // ErrAwsDynamoNoExpressionAttributeNames is returned when "expressionAttributeNames" is missing from the config. + ErrAwsDynamoNoExpressionAttributeNames = errors.New("no expressionAttributeNames given") ) func parseAwsDynamoDBMetadata(config *scalersconfig.ScalerConfig) (*awsDynamoDBMetadata, error) { - meta := awsDynamoDBMetadata{} - - if val, ok := config.TriggerMetadata["tableName"]; ok && val != "" { - meta.tableName = val - } else { - return nil, ErrAwsDynamoNoTableName - } - - if val, ok := config.TriggerMetadata["awsRegion"]; ok && val != "" { - meta.awsRegion = val - } else { - return nil, ErrAwsDynamoNoAwsRegion - } - - if val, ok := config.TriggerMetadata["awsEndpoint"]; ok { - meta.awsEndpoint = val - } - - if val, ok := config.TriggerMetadata["indexName"]; ok { - meta.indexName = val - } - - if val, ok := config.TriggerMetadata["keyConditionExpression"]; ok && val != "" { - meta.keyConditionExpression = val - } else { - return nil, ErrAwsDynamoNoKeyConditionExpression + meta := &awsDynamoDBMetadata{} + if err := config.TypedConfig(meta); err != nil { + return nil, fmt.Errorf("error parsing DynamoDb metadata: %w", err) } - if val, ok := config.TriggerMetadata["expressionAttributeNames"]; ok && val != "" { names, err := json2Map(val) @@ -133,7 +93,6 @@ func parseAwsDynamoDBMetadata(config *scalersconfig.ScalerConfig) (*awsDynamoDBM } else { return nil, ErrAwsDynamoNoExpressionAttributeNames } - if val, ok := config.TriggerMetadata["expressionAttributeValues"]; ok && val != "" { values, err := json2DynamoMap(val) @@ -145,31 +104,10 @@ func parseAwsDynamoDBMetadata(config *scalersconfig.ScalerConfig) (*awsDynamoDBM } else { return nil, ErrAwsDynamoNoExpressionAttributeValues } - - if val, ok := config.TriggerMetadata["targetValue"]; ok && val != "" { - n, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return nil, fmt.Errorf("error parsing metadata targetValue: %w", err) - } - - meta.targetValue = n - } else { - if config.AsMetricSource { - meta.targetValue = 0 - } else { - return nil, ErrAwsDynamoNoTargetValue - } - } - - if val, ok := config.TriggerMetadata["activationTargetValue"]; ok && val != "" { - n, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return nil, fmt.Errorf("error parsing metadata activationTargetValue: %w", err) - } - - meta.activationTargetValue = n - } else { - meta.activationTargetValue = 0 + if meta.TargetValue == -1 && config.AsMetricSource { + meta.TargetValue = 0 + } else if meta.TargetValue == -1 && !config.AsMetricSource { + return nil, ErrAwsDynamoNoTargetValue } auth, err := awsutils.GetAwsAuthorization(config.TriggerUniqueKey, config.PodIdentity, config.TriggerMetadata, config.AuthParams, config.ResolvedEnv) @@ -181,20 +119,20 @@ func parseAwsDynamoDBMetadata(config *scalersconfig.ScalerConfig) (*awsDynamoDBM meta.triggerIndex = config.TriggerIndex meta.metricName = GenerateMetricNameWithIndex(config.TriggerIndex, - kedautil.NormalizeString(fmt.Sprintf("aws-dynamodb-%s", meta.tableName))) + kedautil.NormalizeString(fmt.Sprintf("aws-dynamodb-%s", meta.TableName))) - return &meta, nil + return meta, nil } func createDynamoDBClient(ctx context.Context, metadata *awsDynamoDBMetadata) (*dynamodb.Client, error) { - cfg, err := awsutils.GetAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) + cfg, err := awsutils.GetAwsConfig(ctx, metadata.AwsRegion, metadata.awsAuthorization) if err != nil { return nil, err } return dynamodb.NewFromConfig(*cfg, func(options *dynamodb.Options) { - if metadata.awsEndpoint != "" { - options.BaseEndpoint = aws.String(metadata.awsEndpoint) + if metadata.AwsEndpoint != "" { + options.BaseEndpoint = aws.String(metadata.AwsEndpoint) } }), nil } @@ -208,7 +146,7 @@ func (s *awsDynamoDBScaler) GetMetricsAndActivity(ctx context.Context, metricNam metric := GenerateMetricInMili(metricName, metricValue) - return []external_metrics.ExternalMetricValue{metric}, metricValue > float64(s.metadata.activationTargetValue), nil + return []external_metrics.ExternalMetricValue{metric}, metricValue > float64(s.metadata.ActivationTargetValue), nil } func (s *awsDynamoDBScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { @@ -216,7 +154,7 @@ func (s *awsDynamoDBScaler) GetMetricSpecForScaling(context.Context) []v2.Metric Metric: v2.MetricIdentifier{ Name: s.metadata.metricName, }, - Target: GetMetricTarget(s.metricType, s.metadata.targetValue), + Target: GetMetricTarget(s.metricType, s.metadata.TargetValue), } metricSpec := v2.MetricSpec{External: externalMetric, Type: externalMetricType} @@ -232,14 +170,14 @@ func (s *awsDynamoDBScaler) Close(context.Context) error { func (s *awsDynamoDBScaler) GetQueryMetrics(ctx context.Context) (float64, error) { dimensions := dynamodb.QueryInput{ - TableName: aws.String(s.metadata.tableName), - KeyConditionExpression: aws.String(s.metadata.keyConditionExpression), + TableName: aws.String(s.metadata.TableName), + KeyConditionExpression: aws.String(s.metadata.KeyConditionExpression), ExpressionAttributeNames: s.metadata.expressionAttributeNames, ExpressionAttributeValues: s.metadata.expressionAttributeValues, } - if s.metadata.indexName != "" { - dimensions.IndexName = aws.String(s.metadata.indexName) + if s.metadata.IndexName != "" { + dimensions.IndexName = aws.String(s.metadata.IndexName) } res, err := s.dbClient.Query(ctx, &dimensions) diff --git a/pkg/scalers/aws_dynamodb_scaler_test.go b/pkg/scalers/aws_dynamodb_scaler_test.go index a577f561e21..f6ec68314e8 100644 --- a/pkg/scalers/aws_dynamodb_scaler_test.go +++ b/pkg/scalers/aws_dynamodb_scaler_test.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "strconv" "testing" "github.com/aws/aws-sdk-go-v2/service/dynamodb" @@ -38,6 +37,17 @@ type parseDynamoDBMetadataTestData struct { expectedError error } +var ( + // ErrAwsDynamoNoTableName is returned when "tableName" is missing from the config. + ErrAwsDynamoNoTableName = errors.New("missing required parameter \"tableName\"") + + // ErrAwsDynamoNoAwsRegion is returned when "awsRegion" is missing from the config. + ErrAwsDynamoNoAwsRegion = errors.New("missing required parameter \"awsRegion\"") + + // ErrAwsDynamoNoKeyConditionExpression is returned when "keyConditionExpression" is missing from the config. + ErrAwsDynamoNoKeyConditionExpression = errors.New("missing required parameter \"keyConditionExpression\"") +) + var dynamoTestCases = []parseDynamoDBMetadataTestData{ { name: "no tableName given", @@ -104,7 +114,7 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{ "targetValue": "no-valid", }, authParams: map[string]string{}, - expectedError: strconv.ErrSyntax, + expectedError: errors.New("error parsing DynamoDb metadata: unable to set param \"targetValue\" value"), }, { name: "invalid activationTargetValue given", @@ -118,7 +128,7 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{ "activationTargetValue": "no-valid", }, authParams: map[string]string{}, - expectedError: strconv.ErrSyntax, + expectedError: errors.New("unable to set param \"activationTargetValue\""), }, { name: "malformed expressionAttributeNames", @@ -185,12 +195,12 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{ authParams: testAWSDynamoAuthentication, expectedError: nil, expectedMetadata: &awsDynamoDBMetadata{ - tableName: "test", - awsRegion: "eu-west-1", - keyConditionExpression: "#yr = :yyyy", + TableName: "test", + AwsRegion: "eu-west-1", + KeyConditionExpression: "#yr = :yyyy", expressionAttributeNames: map[string]string{"#yr": year}, expressionAttributeValues: map[string]types.AttributeValue{":yyyy": yearAttr}, - targetValue: 3, + TargetValue: 3, triggerIndex: 1, metricName: "s1-aws-dynamodb-test", awsAuthorization: awsutils.AuthorizationMetadata{ @@ -214,13 +224,13 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{ authParams: testAWSDynamoAuthentication, expectedError: nil, expectedMetadata: &awsDynamoDBMetadata{ - tableName: "test", - awsRegion: "eu-west-1", - awsEndpoint: "http://localhost:4566", - keyConditionExpression: "#yr = :yyyy", + TableName: "test", + AwsRegion: "eu-west-1", + AwsEndpoint: "http://localhost:4566", + KeyConditionExpression: "#yr = :yyyy", expressionAttributeNames: map[string]string{"#yr": year}, expressionAttributeValues: map[string]types.AttributeValue{":yyyy": yearAttr}, - targetValue: 3, + TargetValue: 3, triggerIndex: 1, metricName: "s1-aws-dynamodb-test", awsAuthorization: awsutils.AuthorizationMetadata{ @@ -244,13 +254,13 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{ authParams: testAWSDynamoAuthentication, expectedError: nil, expectedMetadata: &awsDynamoDBMetadata{ - tableName: "test", - awsRegion: "eu-west-1", - keyConditionExpression: "#yr = :yyyy", + TableName: "test", + AwsRegion: "eu-west-1", + KeyConditionExpression: "#yr = :yyyy", expressionAttributeNames: map[string]string{"#yr": year}, expressionAttributeValues: map[string]types.AttributeValue{":yyyy": yearAttr}, - activationTargetValue: 1, - targetValue: 3, + ActivationTargetValue: 1, + TargetValue: 3, triggerIndex: 1, metricName: "s1-aws-dynamodb-test", awsAuthorization: awsutils.AuthorizationMetadata{ @@ -274,13 +284,13 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{ authParams: testAWSDynamoAuthentication, expectedError: nil, expectedMetadata: &awsDynamoDBMetadata{ - tableName: "test", - awsRegion: "eu-west-1", - indexName: "test-index", - keyConditionExpression: "#yr = :yyyy", + TableName: "test", + AwsRegion: "eu-west-1", + IndexName: "test-index", + KeyConditionExpression: "#yr = :yyyy", expressionAttributeNames: map[string]string{"#yr": year}, expressionAttributeValues: map[string]types.AttributeValue{":yyyy": yearAttr}, - targetValue: 3, + TargetValue: 3, triggerIndex: 1, metricName: "s1-aws-dynamodb-test", awsAuthorization: awsutils.AuthorizationMetadata{ @@ -346,48 +356,48 @@ var yearAttr = &types.AttributeValueMemberN{Value: target} var awsDynamoDBGetMetricTestData = []awsDynamoDBMetadata{ { - tableName: "ValidTable", - awsRegion: "eu-west-1", - keyConditionExpression: "#yr = :yyyy", + TableName: "ValidTable", + AwsRegion: "eu-west-1", + KeyConditionExpression: "#yr = :yyyy", expressionAttributeNames: map[string]string{"#yr": year}, expressionAttributeValues: map[string]types.AttributeValue{":yyyy": yearAttr}, - targetValue: 3, + TargetValue: 3, }, { - tableName: testAWSDynamoErrorTable, - awsRegion: "eu-west-1", - keyConditionExpression: "#yr = :yyyy", + TableName: testAWSDynamoErrorTable, + AwsRegion: "eu-west-1", + KeyConditionExpression: "#yr = :yyyy", expressionAttributeNames: map[string]string{"#yr": year}, expressionAttributeValues: map[string]types.AttributeValue{":yyyy": yearAttr}, - targetValue: 3, + TargetValue: 3, }, { - tableName: testAWSDynamoNoValueTable, - awsRegion: "eu-west-1", - keyConditionExpression: "#yr = :yyyy", + TableName: testAWSDynamoNoValueTable, + AwsRegion: "eu-west-1", + KeyConditionExpression: "#yr = :yyyy", expressionAttributeNames: map[string]string{"#yr": year}, expressionAttributeValues: map[string]types.AttributeValue{":yyyy": yearAttr}, - targetValue: 3, + TargetValue: 3, }, { - tableName: testAWSDynamoIndexTable, - awsRegion: "eu-west-1", - indexName: "test-index", - keyConditionExpression: "#yr = :yyyy", + TableName: testAWSDynamoIndexTable, + AwsRegion: "eu-west-1", + IndexName: "test-index", + KeyConditionExpression: "#yr = :yyyy", expressionAttributeNames: map[string]string{"#yr": year}, expressionAttributeValues: map[string]types.AttributeValue{":yyyy": yearAttr}, - activationTargetValue: 3, - targetValue: 3, + ActivationTargetValue: 3, + TargetValue: 3, }, } func TestDynamoGetMetrics(t *testing.T) { for _, meta := range awsDynamoDBGetMetricTestData { - t.Run(meta.tableName, func(t *testing.T) { + t.Run(meta.TableName, func(t *testing.T) { scaler := awsDynamoDBScaler{"", &meta, &mockDynamoDB{}, logr.Discard()} value, _, err := scaler.GetMetricsAndActivity(context.Background(), "aws-dynamodb") - switch meta.tableName { + switch meta.TableName { case testAWSDynamoErrorTable: assert.EqualError(t, err, "error", "expect error because of dynamodb api error") case testAWSDynamoNoValueTable: @@ -403,11 +413,11 @@ func TestDynamoGetMetrics(t *testing.T) { func TestDynamoGetQueryMetrics(t *testing.T) { for _, meta := range awsDynamoDBGetMetricTestData { - t.Run(meta.tableName, func(t *testing.T) { + t.Run(meta.TableName, func(t *testing.T) { scaler := awsDynamoDBScaler{"", &meta, &mockDynamoDB{}, logr.Discard()} value, err := scaler.GetQueryMetrics(context.Background()) - switch meta.tableName { + switch meta.TableName { case testAWSDynamoErrorTable: assert.EqualError(t, err, "error", "expect error because of dynamodb api error") case testAWSDynamoNoValueTable: @@ -423,11 +433,11 @@ func TestDynamoGetQueryMetrics(t *testing.T) { func TestDynamoIsActive(t *testing.T) { for _, meta := range awsDynamoDBGetMetricTestData { - t.Run(meta.tableName, func(t *testing.T) { + t.Run(meta.TableName, func(t *testing.T) { scaler := awsDynamoDBScaler{"", &meta, &mockDynamoDB{}, logr.Discard()} _, value, err := scaler.GetMetricsAndActivity(context.Background(), "aws-dynamodb") - switch meta.tableName { + switch meta.TableName { case testAWSDynamoErrorTable: assert.EqualError(t, err, "error", "expect error because of dynamodb api error") case testAWSDynamoNoValueTable: diff --git a/pkg/scalers/cron_scaler.go b/pkg/scalers/cron_scaler.go index 782082c0098..bd9c5a22a7d 100644 --- a/pkg/scalers/cron_scaler.go +++ b/pkg/scalers/cron_scaler.go @@ -3,7 +3,6 @@ package scalers import ( "context" "fmt" - "strconv" "strings" "time" @@ -23,16 +22,47 @@ const ( type cronScaler struct { metricType v2.MetricTargetType - metadata *cronMetadata + metadata cronMetadata logger logr.Logger } type cronMetadata struct { - start string - end string - timezone string - desiredReplicas int64 - triggerIndex int + Start string `keda:"name=start, order=triggerMetadata"` + End string `keda:"name=end, order=triggerMetadata"` + Timezone string `keda:"name=timezone, order=triggerMetadata"` + DesiredReplicas int64 `keda:"name=desiredReplicas, order=triggerMetadata"` + TriggerIndex int +} + +func (m *cronMetadata) Validate() error { + if m.Timezone == "" { + return fmt.Errorf("no timezone specified") + } + + parser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) + if m.Start == "" { + return fmt.Errorf("no start schedule specified") + } + if _, err := parser.Parse(m.Start); err != nil { + return fmt.Errorf("error parsing start schedule: %w", err) + } + + if m.End == "" { + return fmt.Errorf("no end schedule specified") + } + if _, err := parser.Parse(m.End); err != nil { + return fmt.Errorf("error parsing end schedule: %w", err) + } + + if m.Start == m.End { + return fmt.Errorf("start and end can not have exactly same time input") + } + + if m.DesiredReplicas == 0 { + return fmt.Errorf("no desiredReplicas specified") + } + + return nil } // NewCronScaler creates a new cronScaler @@ -42,9 +72,9 @@ func NewCronScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %w", err) } - meta, parseErr := parseCronMetadata(config) - if parseErr != nil { - return nil, fmt.Errorf("error parsing cron metadata: %w", parseErr) + meta, err := parseCronMetadata(config) + if err != nil { + return nil, fmt.Errorf("error parsing cron metadata: %w", err) } return &cronScaler{ @@ -68,51 +98,12 @@ func getCronTime(location *time.Location, spec string) (int64, error) { return cronTime, nil } -func parseCronMetadata(config *scalersconfig.ScalerConfig) (*cronMetadata, error) { - if len(config.TriggerMetadata) == 0 { - return nil, fmt.Errorf("invalid Input Metadata. %s", config.TriggerMetadata) +func parseCronMetadata(config *scalersconfig.ScalerConfig) (cronMetadata, error) { + meta := cronMetadata{TriggerIndex: config.TriggerIndex} + if err := config.TypedConfig(&meta); err != nil { + return meta, err } - - meta := cronMetadata{} - if val, ok := config.TriggerMetadata["timezone"]; ok && val != "" { - meta.timezone = val - } else { - return nil, fmt.Errorf("no timezone specified. %s", config.TriggerMetadata) - } - parser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) - if val, ok := config.TriggerMetadata["start"]; ok && val != "" { - _, err := parser.Parse(val) - if err != nil { - return nil, fmt.Errorf("error parsing start schedule: %w", err) - } - meta.start = val - } else { - return nil, fmt.Errorf("no start schedule specified. %s", config.TriggerMetadata) - } - if val, ok := config.TriggerMetadata["end"]; ok && val != "" { - _, err := parser.Parse(val) - if err != nil { - return nil, fmt.Errorf("error parsing end schedule: %w", err) - } - meta.end = val - } else { - return nil, fmt.Errorf("no end schedule specified. %s", config.TriggerMetadata) - } - if meta.start == meta.end { - return nil, fmt.Errorf("error parsing schedule. %s: start and end can not have exactly same time input", config.TriggerMetadata) - } - if val, ok := config.TriggerMetadata["desiredReplicas"]; ok && val != "" { - metadataDesiredReplicas, err := strconv.Atoi(val) - if err != nil { - return nil, fmt.Errorf("error parsing desiredReplicas metadata. %s", config.TriggerMetadata) - } - - meta.desiredReplicas = int64(metadataDesiredReplicas) - } else { - return nil, fmt.Errorf("no DesiredReplicas specified. %s", config.TriggerMetadata) - } - meta.triggerIndex = config.TriggerIndex - return &meta, nil + return meta, nil } func (s *cronScaler) Close(context.Context) error { @@ -132,7 +123,7 @@ func (s *cronScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { var specReplicas int64 = 1 externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("cron-%s-%s-%s", s.metadata.timezone, parseCronTimeFormat(s.metadata.start), parseCronTimeFormat(s.metadata.end)))), + Name: GenerateMetricNameWithIndex(s.metadata.TriggerIndex, kedautil.NormalizeString(fmt.Sprintf("cron-%s-%s-%s", s.metadata.Timezone, parseCronTimeFormat(s.metadata.Start), parseCronTimeFormat(s.metadata.End)))), }, Target: GetMetricTarget(s.metricType, specReplicas), } @@ -144,7 +135,7 @@ func (s *cronScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { func (s *cronScaler) GetMetricsAndActivity(_ context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { var defaultDesiredReplicas = int64(defaultDesiredReplicas) - location, err := time.LoadLocation(s.metadata.timezone) + location, err := time.LoadLocation(s.metadata.Timezone) if err != nil { return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("unable to load timezone. Error: %w", err) } @@ -152,12 +143,12 @@ func (s *cronScaler) GetMetricsAndActivity(_ context.Context, metricName string) // Since we are considering the timestamp here and not the exact time, timezone does matter. currentTime := time.Now().Unix() - nextStartTime, startTimecronErr := getCronTime(location, s.metadata.start) + nextStartTime, startTimecronErr := getCronTime(location, s.metadata.Start) if startTimecronErr != nil { return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error initializing start cron: %w", startTimecronErr) } - nextEndTime, endTimecronErr := getCronTime(location, s.metadata.end) + nextEndTime, endTimecronErr := getCronTime(location, s.metadata.End) if endTimecronErr != nil { return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error intializing end cron: %w", endTimecronErr) } @@ -167,7 +158,7 @@ func (s *cronScaler) GetMetricsAndActivity(_ context.Context, metricName string) metric := GenerateMetricInMili(metricName, float64(defaultDesiredReplicas)) return []external_metrics.ExternalMetricValue{metric}, false, nil case currentTime <= nextEndTime: - metric := GenerateMetricInMili(metricName, float64(s.metadata.desiredReplicas)) + metric := GenerateMetricInMili(metricName, float64(s.metadata.DesiredReplicas)) return []external_metrics.ExternalMetricValue{metric}, true, nil default: metric := GenerateMetricInMili(metricName, float64(defaultDesiredReplicas)) diff --git a/pkg/scalers/pulsar_scaler.go b/pkg/scalers/pulsar_scaler.go index c9732083113..66015982bd5 100644 --- a/pkg/scalers/pulsar_scaler.go +++ b/pkg/scalers/pulsar_scaler.go @@ -43,7 +43,6 @@ type pulsarMetadata struct { } const ( - msgBacklogMetricName = "msgBacklog" pulsarMetricType = "External" defaultMsgBacklogThreshold = 10 enable = "enable" @@ -133,7 +132,7 @@ func NewPulsarScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { }, nil } -func parsePulsarMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger) (pulsarMetadata, error) { +func parsePulsarMetadata(config *scalersconfig.ScalerConfig, _ logr.Logger) (pulsarMetadata, error) { meta := pulsarMetadata{} switch { case config.TriggerMetadata["adminURLFromEnv"] != "": @@ -182,23 +181,13 @@ func parsePulsarMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger) meta.msgBacklogThreshold = defaultMsgBacklogThreshold - // FIXME: msgBacklog support DEPRECATED to be removed in v2.14 - fmt.Println(config.TriggerMetadata) - if val, ok := config.TriggerMetadata[msgBacklogMetricName]; ok { - logger.V(1).Info("\"msgBacklog\" is deprecated and will be removed in v2.14, please use \"msgBacklogThreshold\" instead") + if val, ok := config.TriggerMetadata["msgBacklogThreshold"]; ok { t, err := strconv.ParseInt(val, 10, 64) if err != nil { - return meta, fmt.Errorf("error parsing %s: %w", msgBacklogMetricName, err) - } - meta.msgBacklogThreshold = t - } else if val, ok := config.TriggerMetadata["msgBacklogThreshold"]; ok { - t, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return meta, fmt.Errorf("error parsing %s: %w", msgBacklogMetricName, err) + return meta, fmt.Errorf("error parsing %s: %w", "msgBacklogThreshold", err) } meta.msgBacklogThreshold = t } - // END FIXME // For backwards compatibility, we need to map "tls: enable" to if tls, ok := config.TriggerMetadata["tls"]; ok { @@ -212,7 +201,7 @@ func parsePulsarMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger) } auth, err := authentication.GetAuthConfigs(config.TriggerMetadata, config.AuthParams) if err != nil { - return meta, fmt.Errorf("error parsing %s: %w", msgBacklogMetricName, err) + return meta, fmt.Errorf("error parsing %s: %w", "msgBacklogThreshold", err) } if auth != nil && auth.EnableOAuth { diff --git a/pkg/scalers/pulsar_scaler_test.go b/pkg/scalers/pulsar_scaler_test.go index 5c7148cf5b6..68b183488eb 100644 --- a/pkg/scalers/pulsar_scaler_test.go +++ b/pkg/scalers/pulsar_scaler_test.go @@ -156,14 +156,7 @@ func TestParsePulsarMetadata(t *testing.T) { } var testDataMsgBacklogThreshold int64 - // FIXME: msgBacklog support DEPRECATED to be removed in v2.14 - if val, ok := testData.metadata["msgBacklog"]; ok { - testDataMsgBacklogThreshold, err = strconv.ParseInt(val, 10, 64) - if err != nil { - t.Errorf("error parseing msgBacklog: %v", err) - } - // END FiXME - } else if val, ok := testData.metadata["msgBacklogThreshold"]; ok { + if val, ok := testData.metadata["msgBacklogThreshold"]; ok { testDataMsgBacklogThreshold, err = strconv.ParseInt(val, 10, 64) if err != nil { t.Errorf("error parseing msgBacklogThreshold: %v", err) diff --git a/tests/internals/events/events_test.go b/tests/internals/events/events_test.go index 5f3c4983696..3f9122d81fe 100644 --- a/tests/internals/events/events_test.go +++ b/tests/internals/events/events_test.go @@ -28,6 +28,10 @@ var ( scaledObjectName = fmt.Sprintf("%s-so", testName) scaledObjectTargetNotFoundName = fmt.Sprintf("%s-so-target-error", testName) scaledObjectTargetNoSubresourceName = fmt.Sprintf("%s-so-target-no-subresource", testName) + secretName = fmt.Sprintf("%s-secret", testName) + secretName2 = fmt.Sprintf("%s-secret-2", testName) + triggerAuthName = fmt.Sprintf("%s-ta", testName) + clusterTriggerAuthName = fmt.Sprintf("%s-cta", testName) scaledJobName = fmt.Sprintf("%s-sj", testName) scaledJobErrName = fmt.Sprintf("%s-sj-target-error", testName) @@ -43,6 +47,11 @@ type templateData struct { DaemonsetName string ScaledJobName string ScaledJobErrName string + SecretName string + SecretName2 string + SecretTargetName string + TriggerAuthName string + ClusterTriggerAuthName string } const ( @@ -223,6 +232,54 @@ spec: typex: Utilization value: "50" ` + + secretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +data: + AUTH_PASSWORD: U0VDUkVUCg== + AUTH_USERNAME: VVNFUgo= +` + secret2Template = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName2}} + namespace: {{.TestNamespace}} +data: + AUTH_PASSWORD: U0VDUkVUCg== + AUTH_USERNAME: VVNFUgo= +` + + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthName}} + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: username + name: {{.SecretTargetName}} + key: AUTH_USERNAME + - parameter: password + name: {{.SecretTargetName}} + key: AUTH_PASSWORD +` + + clusterTriggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: ClusterTriggerAuthentication +metadata: + name: {{.ClusterTriggerAuthName}} +spec: + secretTargetRef: + - parameter: username + name: {{.SecretTargetName}} + key: AUTH_USERNAME + - parameter: password + name: {{.SecretTargetName}} + key: AUTH_PASSWORD +` ) func TestEvents(t *testing.T) { @@ -242,6 +299,9 @@ func TestEvents(t *testing.T) { testScaledJobNormalEvent(t, kc, data) testScaledJobTargetNotSupportEventErr(t, kc, data) + + testTriggerAuthenticationEvent(t, kc, data) + // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) } @@ -257,11 +317,15 @@ func getTemplateData() (templateData, []Template) { ScaledObjectTargetNoSubresourceName: scaledObjectTargetNoSubresourceName, ScaledJobName: scaledJobName, ScaledJobErrName: scaledJobErrName, + SecretName: secretName, + SecretName2: secretName2, + TriggerAuthName: triggerAuthName, + ClusterTriggerAuthName: clusterTriggerAuthName, }, []Template{} } -func checkingEvent(t *testing.T, scaledObject string, index int, eventreason string, message string) { - result, err := ExecuteCommand(fmt.Sprintf("kubectl get events -n %s --field-selector involvedObject.name=%s --sort-by=.metadata.creationTimestamp -o jsonpath=\"{.items[%d].reason}:{.items[%d].message}\"", testNamespace, scaledObject, index, index)) +func checkingEvent(t *testing.T, namespace string, scaledObject string, index int, eventreason string, message string) { + result, err := ExecuteCommand(fmt.Sprintf("kubectl get events -n %s --field-selector involvedObject.name=%s --sort-by=.metadata.creationTimestamp -o jsonpath=\"{.items[%d].reason}:{.items[%d].message}\"", namespace, scaledObject, index, index)) assert.NoError(t, err) lastEventMessage := strings.Trim(string(result), "\"") @@ -279,9 +343,9 @@ func testNormalEvent(t *testing.T, kc *kubernetes.Clientset, data templateData) KubernetesScaleDeployment(t, kc, monitoredDeploymentName, 2, testNamespace) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 60, 1), "replica count should be 2 after 1 minute") - checkingEvent(t, scaledObjectName, 0, eventreason.KEDAScalersStarted, fmt.Sprintf(message.ScalerIsBuiltMsg, "kubernetes-workload")) - checkingEvent(t, scaledObjectName, 1, eventreason.KEDAScalersStarted, message.ScalerStartMsg) - checkingEvent(t, scaledObjectName, 2, eventreason.ScaledObjectReady, message.ScalerReadyMsg) + checkingEvent(t, testNamespace, scaledObjectName, 0, eventreason.KEDAScalersStarted, fmt.Sprintf(message.ScalerIsBuiltMsg, "kubernetes-workload")) + checkingEvent(t, testNamespace, scaledObjectName, 1, eventreason.KEDAScalersStarted, message.ScalerStartMsg) + checkingEvent(t, testNamespace, scaledObjectName, 2, eventreason.ScaledObjectReady, message.ScalerReadyMsg) KubectlDeleteWithTemplate(t, data, "deploymentTemplate", deploymentTemplate) KubectlDeleteWithTemplate(t, data, "monitoredDeploymentName", monitoredDeploymentTemplate) @@ -292,8 +356,8 @@ func testTargetNotFoundErr(t *testing.T, _ *kubernetes.Clientset, data templateD t.Log("--- testing target not found error event ---") KubectlApplyWithTemplate(t, data, "scaledObjectTargetErrTemplate", scaledObjectTargetErrTemplate) - checkingEvent(t, scaledObjectTargetNotFoundName, -2, eventreason.ScaledObjectCheckFailed, message.ScaleTargetNotFoundMsg) - checkingEvent(t, scaledObjectTargetNotFoundName, -1, eventreason.ScaledObjectCheckFailed, message.ScaleTargetErrMsg) + checkingEvent(t, testNamespace, scaledObjectTargetNotFoundName, -2, eventreason.ScaledObjectCheckFailed, message.ScaleTargetNotFoundMsg) + checkingEvent(t, testNamespace, scaledObjectTargetNotFoundName, -1, eventreason.ScaledObjectCheckFailed, message.ScaleTargetErrMsg) } func testTargetNotSupportEventErr(t *testing.T, _ *kubernetes.Clientset, data templateData) { @@ -301,8 +365,8 @@ func testTargetNotSupportEventErr(t *testing.T, _ *kubernetes.Clientset, data te KubectlApplyWithTemplate(t, data, "daemonSetTemplate", daemonSetTemplate) KubectlApplyWithTemplate(t, data, "scaledObjectTargetNotSupportTemplate", scaledObjectTargetNotSupportTemplate) - checkingEvent(t, scaledObjectTargetNoSubresourceName, -2, eventreason.ScaledObjectCheckFailed, message.ScaleTargetNoSubresourceMsg) - checkingEvent(t, scaledObjectTargetNoSubresourceName, -1, eventreason.ScaledObjectCheckFailed, message.ScaleTargetErrMsg) + checkingEvent(t, testNamespace, scaledObjectTargetNoSubresourceName, -2, eventreason.ScaledObjectCheckFailed, message.ScaleTargetNoSubresourceMsg) + checkingEvent(t, testNamespace, scaledObjectTargetNoSubresourceName, -1, eventreason.ScaledObjectCheckFailed, message.ScaleTargetErrMsg) } func testScaledJobNormalEvent(t *testing.T, kc *kubernetes.Clientset, data templateData) { @@ -315,9 +379,9 @@ func testScaledJobNormalEvent(t *testing.T, kc *kubernetes.Clientset, data templ KubernetesScaleDeployment(t, kc, monitoredDeploymentName, 2, testNamespace) assert.True(t, WaitForJobCount(t, kc, testNamespace, 2, 60, 1), "replica count should be 2 after 1 minute") - checkingEvent(t, scaledJobName, 0, eventreason.KEDAScalersStarted, fmt.Sprintf(message.ScalerIsBuiltMsg, "kubernetes-workload")) - checkingEvent(t, scaledJobName, 1, eventreason.KEDAScalersStarted, message.ScalerStartMsg) - checkingEvent(t, scaledJobName, 2, eventreason.ScaledJobReady, message.ScaledJobReadyMsg) + checkingEvent(t, testNamespace, scaledJobName, 0, eventreason.KEDAScalersStarted, fmt.Sprintf(message.ScalerIsBuiltMsg, "kubernetes-workload")) + checkingEvent(t, testNamespace, scaledJobName, 1, eventreason.KEDAScalersStarted, message.ScalerStartMsg) + checkingEvent(t, testNamespace, scaledJobName, 2, eventreason.ScaledJobReady, message.ScaledJobReadyMsg) KubectlDeleteWithTemplate(t, data, "deploymentTemplate", deploymentTemplate) KubectlDeleteWithTemplate(t, data, "monitoredDeploymentName", monitoredDeploymentTemplate) @@ -328,5 +392,33 @@ func testScaledJobTargetNotSupportEventErr(t *testing.T, _ *kubernetes.Clientset t.Log("--- testing target not support error event ---") KubectlApplyWithTemplate(t, data, "scaledJobErrTemplate", scaledJobErrTemplate) - checkingEvent(t, scaledJobErrName, -1, eventreason.ScaledJobCheckFailed, "Failed to ensure ScaledJob is correctly created") + checkingEvent(t, testNamespace, scaledJobErrName, -1, eventreason.ScaledJobCheckFailed, "Failed to ensure ScaledJob is correctly created") +} + +func testTriggerAuthenticationEvent(t *testing.T, _ *kubernetes.Clientset, data templateData) { + t.Log("--- testing ScaledJob normal event ---") + + KubectlApplyWithTemplate(t, data, "secretTemplate", secretTemplate) + KubectlApplyWithTemplate(t, data, "secret2Template", secret2Template) + + data.SecretTargetName = secretName + KubectlApplyWithTemplate(t, data, "triggerAuthenticationTemplate", triggerAuthenticationTemplate) + + checkingEvent(t, testNamespace, triggerAuthName, 0, eventreason.TriggerAuthenticationAdded, message.TriggerAuthenticationCreatedMsg) + + KubectlApplyWithTemplate(t, data, "clusterTriggerAuthenticationTemplate", clusterTriggerAuthenticationTemplate) + + checkingEvent(t, "default", clusterTriggerAuthName, 0, eventreason.ClusterTriggerAuthenticationAdded, message.ClusterTriggerAuthenticationCreatedMsg) + + data.SecretTargetName = secretName2 + KubectlApplyWithTemplate(t, data, "triggerAuthenticationTemplate", triggerAuthenticationTemplate) + + checkingEvent(t, testNamespace, triggerAuthName, -1, eventreason.TriggerAuthenticationUpdated, fmt.Sprintf(message.TriggerAuthenticationUpdatedMsg, triggerAuthName)) + KubectlApplyWithTemplate(t, data, "clusterTriggerAuthenticationTemplate", clusterTriggerAuthenticationTemplate) + + checkingEvent(t, "default", clusterTriggerAuthName, -1, eventreason.ClusterTriggerAuthenticationUpdated, fmt.Sprintf(message.ClusterTriggerAuthenticationUpdatedMsg, clusterTriggerAuthName)) + KubectlDeleteWithTemplate(t, data, "secretTemplate", secretTemplate) + KubectlDeleteWithTemplate(t, data, "secret2Template", secret2Template) + KubectlDeleteWithTemplate(t, data, "triggerAuthenticationTemplate", triggerAuthenticationTemplate) + KubectlDeleteWithTemplate(t, data, "clusterTriggerAuthenticationTemplate", clusterTriggerAuthenticationTemplate) }