diff --git a/.github/workflows/main-build.yml b/.github/workflows/main-build.yml index 61d600ec35a..aebf815d561 100644 --- a/.github/workflows/main-build.yml +++ b/.github/workflows/main-build.yml @@ -52,7 +52,7 @@ jobs: # Username used to log in to a Docker registry. If not set then no login will occur username: ${{ github.repository_owner }} # Password or personal access token used to log in to a Docker registry. If not set then no login will occur - password: ${{ secrets.GHCR_AUTH_PAT }} + password: ${{ secrets.GH_AUTOMATION_PAT }} # Server address of Docker registry. If not set then will default to Docker Hub registry: ghcr.io diff --git a/.github/workflows/pr-e2e.yml b/.github/workflows/pr-e2e.yml index 72d710d4b2d..1694be2d10e 100644 --- a/.github/workflows/pr-e2e.yml +++ b/.github/workflows/pr-e2e.yml @@ -136,7 +136,7 @@ jobs: # Username used to log in to a Docker registry. If not set then no login will occur username: ${{ github.repository_owner }} # Password or personal access token used to log in to a Docker registry. If not set then no login will occur - password: ${{ secrets.GHCR_AUTH_PAT }} + password: ${{ secrets.GH_AUTOMATION_PAT }} # Server address of Docker registry. If not set then will default to Docker Hub registry: ghcr.io @@ -181,7 +181,7 @@ jobs: - name: Scale cluster run: make scale-node-pool env: - NODE_POOL_SIZE: 2 + NODE_POOL_SIZE: 3 TEST_CLUSTER_NAME: keda-e2e-cluster-pr - name: Run end to end tests diff --git a/.github/workflows/release-build.yml b/.github/workflows/release-build.yml index 897f78a36fb..39f2b48198c 100644 --- a/.github/workflows/release-build.yml +++ b/.github/workflows/release-build.yml @@ -49,7 +49,7 @@ jobs: # Username used to log in to a Docker registry. If not set then no login will occur username: ${{ github.repository_owner }} # Password or personal access token used to log in to a Docker registry. If not set then no login will occur - password: ${{ secrets.GHCR_AUTH_PAT }} + password: ${{ secrets.GH_AUTOMATION_PAT }} # Server address of Docker registry. If not set then will default to Docker Hub registry: ghcr.io diff --git a/.github/workflows/template-main-e2e-test.yml b/.github/workflows/template-main-e2e-test.yml index d6dbb992e5a..88d4504bcd3 100644 --- a/.github/workflows/template-main-e2e-test.yml +++ b/.github/workflows/template-main-e2e-test.yml @@ -26,7 +26,7 @@ jobs: - name: Scale cluster run: make scale-node-pool env: - NODE_POOL_SIZE: 2 + NODE_POOL_SIZE: 3 - name: Run end to end tests env: diff --git a/CHANGELOG.md b/CHANGELOG.md index 484448d3a80..7417d9111c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,8 +57,12 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ### New +- **General**: Add the generateEmbeddedObjectMeta flag to generate meta properties of JobTargetRef in ScaledJob ([#5908](https://github.com/kedacore/keda/issues/5908)) - **General**: Cache miss fallback in validating webhook for ScaledObjects with direct kubernetes client ([#5973](https://github.com/kedacore/keda/issues/5973)) +- **General**: Introduce new Beanstalkd scaler ([#5901](https://github.com/kedacore/keda/issues/5901)) - **General**: Introduce new NSQ Scaler ([#3281](https://github.com/kedacore/keda/issues/3281)) +- **General**: Replace wildcards in RBAC objects with explicit resources and verbs ([#6129](https://github.com/kedacore/keda/pull/6129)) +- **Azure Pipelines Scalar**: Print warning to log when Azure DevOps API Rate Limits are (nearly) reached ([#6284](https://github.com/kedacore/keda/issues/6284)) - **CloudEventSource**: Introduce ClusterCloudEventSource ([#3533](https://github.com/kedacore/keda/issues/3533)) - **CloudEventSource**: Provide ClusterCloudEventSource around the management of ScaledJobs resources ([#3523](https://github.com/kedacore/keda/issues/3523)) - **CloudEventSource**: Provide ClusterCloudEventSource around the management of TriggerAuthentication/ClusterTriggerAuthentication resources ([#3524](https://github.com/kedacore/keda/issues/3524)) @@ -73,22 +77,28 @@ Here is an overview of all new **experimental** features: ### Improvements +- **General**: Prevent multiple ScaledObjects managing one HPA ([#6130](https://github.com/kedacore/keda/issues/6130)) +- **General**: Show full triggers'types and authentications'types in status ([#6187](https://github.com/kedacore/keda/issues/6187)) - **AWS CloudWatch Scaler**: Add support for ignoreNullValues ([#5352](https://github.com/kedacore/keda/issues/5352)) - **Elasticsearch Scaler**: Support Query at the Elasticsearch scaler ([#6216](https://github.com/kedacore/keda/issues/6216)) - **Etcd Scaler**: Add username and password support for etcd ([#6199](https://github.com/kedacore/keda/pull/6199)) - **GCP Scalers**: Added custom time horizon in GCP scalers ([#5778](https://github.com/kedacore/keda/issues/5778)) +- **GitHub Scaler**: Add support to not scale on default runner labels ([#6127](https://github.com/kedacore/keda/issues/6127)) - **GitHub Scaler**: Fixed pagination, fetching repository list ([#5738](https://github.com/kedacore/keda/issues/5738)) - **Grafana dashboard**: Fix dashboard to handle wildcard scaledObject variables ([#6214](https://github.com/kedacore/keda/issues/6214)) - **Kafka**: Allow disabling FAST negotation when using Kerberos ([#6188](https://github.com/kedacore/keda/issues/6188)) - **Kafka**: Fix logic to scale to zero on invalid offset even with earliest offsetResetPolicy ([#5689](https://github.com/kedacore/keda/issues/5689)) - **RabbitMQ Scaler**: Add connection name for AMQP ([#5958](https://github.com/kedacore/keda/issues/5958)) -- **Selenium Scaler**: Add Support for Username and Password Authentication ([#6144](https://github.com/kedacore/keda/issues/6144)) -- **Selenium Scaler**: Introduce new parameters setSessionsFromHub, sessionsPerNode and sessionBrowserVersion. ([#6080](https://github.com/kedacore/keda/issues/6080)) +- **Selenium Grid Scaler**: Add optional auth parameters `username`, `password`, `authType`, `accessToken` to configure a secure GraphQL endpoint ([#6144](https://github.com/kedacore/keda/issues/6144)) +- **Selenium Grid Scaler**: Add parameter `nodeMaxSessions` to configure scaler sync with `--max-sessions` capacity in the Node ([#6080](https://github.com/kedacore/keda/issues/6080)) +- **Selenium Grid Scaler**: Improve logic based on node stereotypes, node sessions and queue requests capabilities ([#6080](https://github.com/kedacore/keda/issues/6080)) - TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) ### Fixes +- **General**: Scalers cache uses a mutex to prevent concurrent actions ([#6273](https://github.com/kedacore/keda/issues/6273)) - **AWS Secret Manager**: Pod identity overrides are honored ([#6195](https://github.com/kedacore/keda/issues/6195)) +- **AWS SQS Scaler**: Improve error handling for SQS queue metrics ([#6178](https://github.com/kedacore/keda/issues/6178)) - **Azure Event Hub Scaler**: Checkpointer errors are correctly handled ([#6084](https://github.com/kedacore/keda/issues/6084)) - **Metrics API Scaler**: Prometheus metrics can have multiple labels ([#6077](https://github.com/kedacore/keda/issues/6077)) diff --git a/Makefile b/Makefile index 4ee8b59f2df..22c8c707608 100644 --- a/Makefile +++ b/Makefile @@ -131,7 +131,7 @@ smoke-test: ## Run e2e tests against Kubernetes cluster configured in ~/.kube/co ##@ Development manifests: controller-gen ## Generate ClusterRole and CustomResourceDefinition objects. - $(CONTROLLER_GEN) crd:crdVersions=v1 rbac:roleName=keda-operator paths="./..." output:crd:artifacts:config=config/crd/bases + $(CONTROLLER_GEN) crd:crdVersions=v1,generateEmbeddedObjectMeta=true rbac:roleName=keda-operator paths="./..." output:crd:artifacts:config=config/crd/bases # withTriggers is only used for duck typing so we only need the deepcopy methods # However operator-sdk generate doesn't appear to have an option for that # until this issue is fixed: https://github.com/kubernetes-sigs/controller-tools/issues/398 diff --git a/apis/keda/v1alpha1/scaledjob_types.go b/apis/keda/v1alpha1/scaledjob_types.go index 366f22a14cd..deab161df6a 100644 --- a/apis/keda/v1alpha1/scaledjob_types.go +++ b/apis/keda/v1alpha1/scaledjob_types.go @@ -32,11 +32,11 @@ const ( // +kubebuilder:resource:path=scaledjobs,scope=Namespaced,shortName=sj // +kubebuilder:printcolumn:name="Min",type="integer",JSONPath=".spec.minReplicaCount" // +kubebuilder:printcolumn:name="Max",type="integer",JSONPath=".spec.maxReplicaCount" -// +kubebuilder:printcolumn:name="Triggers",type="string",JSONPath=".spec.triggers[*].type" -// +kubebuilder:printcolumn:name="Authentication",type="string",JSONPath=".spec.triggers[*].authenticationRef.name" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status" // +kubebuilder:printcolumn:name="Active",type="string",JSONPath=".status.conditions[?(@.type==\"Active\")].status" // +kubebuilder:printcolumn:name="Paused",type="string",JSONPath=".status.conditions[?(@.type==\"Paused\")].status" +// +kubebuilder:printcolumn:name="Triggers",type="string",JSONPath=".status.triggersTypes" +// +kubebuilder:printcolumn:name="Authentications",type="string",JSONPath=".status.authenticationsTypes" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // ScaledJob is the Schema for the scaledjobs API @@ -81,6 +81,10 @@ type ScaledJobStatus struct { Conditions Conditions `json:"conditions,omitempty"` // +optional Paused string `json:"Paused,omitempty"` + // +optional + TriggersTypes *string `json:"triggersTypes,omitempty"` + // +optional + AuthenticationsTypes *string `json:"authenticationsTypes,omitempty"` } // ScaledJobList contains a list of ScaledJob diff --git a/apis/keda/v1alpha1/scaledobject_types.go b/apis/keda/v1alpha1/scaledobject_types.go index f73de8e0a69..37c5e640963 100644 --- a/apis/keda/v1alpha1/scaledobject_types.go +++ b/apis/keda/v1alpha1/scaledobject_types.go @@ -33,12 +33,12 @@ import ( // +kubebuilder:printcolumn:name="ScaleTargetName",type="string",JSONPath=".spec.scaleTargetRef.name" // +kubebuilder:printcolumn:name="Min",type="integer",JSONPath=".spec.minReplicaCount" // +kubebuilder:printcolumn:name="Max",type="integer",JSONPath=".spec.maxReplicaCount" -// +kubebuilder:printcolumn:name="Triggers",type="string",JSONPath=".spec.triggers[*].type" -// +kubebuilder:printcolumn:name="Authentication",type="string",JSONPath=".spec.triggers[*].authenticationRef.name" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status" // +kubebuilder:printcolumn:name="Active",type="string",JSONPath=".status.conditions[?(@.type==\"Active\")].status" // +kubebuilder:printcolumn:name="Fallback",type="string",JSONPath=".status.conditions[?(@.type==\"Fallback\")].status" // +kubebuilder:printcolumn:name="Paused",type="string",JSONPath=".status.conditions[?(@.type==\"Paused\")].status" +// +kubebuilder:printcolumn:name="Triggers",type="string",JSONPath=".status.triggersTypes" +// +kubebuilder:printcolumn:name="Authentications",type="string",JSONPath=".status.authenticationsTypes" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // ScaledObject is a specification for a ScaledObject resource @@ -177,6 +177,10 @@ type ScaledObjectStatus struct { PausedReplicaCount *int32 `json:"pausedReplicaCount,omitempty"` // +optional HpaName string `json:"hpaName,omitempty"` + // +optional + TriggersTypes *string `json:"triggersTypes,omitempty"` + // +optional + AuthenticationsTypes *string `json:"authenticationsTypes,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/keda/v1alpha1/scaledobject_webhook.go b/apis/keda/v1alpha1/scaledobject_webhook.go index b96c984445b..41bd0355596 100644 --- a/apis/keda/v1alpha1/scaledobject_webhook.go +++ b/apis/keda/v1alpha1/scaledobject_webhook.go @@ -295,6 +295,7 @@ func verifyScaledObjects(incomingSo *ScaledObject, action string, _ bool) error return err } + incomingSoHpaName := getHpaName(*incomingSo) for _, so := range soList.Items { if so.Name == incomingSo.Name { continue @@ -315,6 +316,13 @@ func verifyScaledObjects(incomingSo *ScaledObject, action string, _ bool) error metricscollector.RecordScaledObjectValidatingErrors(incomingSo.Namespace, action, "other-scaled-object") return err } + + if getHpaName(so) == incomingSoHpaName { + err = fmt.Errorf("the HPA '%s' is already managed by the ScaledObject '%s'", so.Spec.Advanced.HorizontalPodAutoscalerConfig.Name, so.Name) + scaledobjectlog.Error(err, "validation error") + metricscollector.RecordScaledObjectValidatingErrors(incomingSo.Namespace, action, "other-scaled-object-hpa") + return err + } } // verify ScalingModifiers structure if defined in ScaledObject @@ -572,3 +580,11 @@ func isContainerResourceLimitSet(ctx context.Context, namespace string, triggerT return false } + +func getHpaName(so ScaledObject) string { + if so.Spec.Advanced == nil || so.Spec.Advanced.HorizontalPodAutoscalerConfig == nil || so.Spec.Advanced.HorizontalPodAutoscalerConfig.Name == "" { + return fmt.Sprintf("keda-hpa-%s", so.Name) + } + + return so.Spec.Advanced.HorizontalPodAutoscalerConfig.Name +} diff --git a/apis/keda/v1alpha1/scaletriggers_types.go b/apis/keda/v1alpha1/scaletriggers_types.go index 584dfa8b614..78c411966f9 100644 --- a/apis/keda/v1alpha1/scaletriggers_types.go +++ b/apis/keda/v1alpha1/scaletriggers_types.go @@ -18,6 +18,8 @@ package v1alpha1 import ( "fmt" + "slices" + "strings" autoscalingv2 "k8s.io/api/autoscaling/v2" ) @@ -80,3 +82,18 @@ func ValidateTriggers(triggers []ScaleTriggers) error { return nil } + +// CombinedTriggersAndAuthenticationsTypes returns a comma separated string of all trigger types and authentication types +func CombinedTriggersAndAuthenticationsTypes(triggers []ScaleTriggers) (string, string) { + var triggersTypes []string + var authTypes []string + for _, trigger := range triggers { + if !slices.Contains(triggersTypes, trigger.Type) { + triggersTypes = append(triggersTypes, trigger.Type) + } + if trigger.AuthenticationRef != nil && !slices.Contains(authTypes, trigger.AuthenticationRef.Name) { + authTypes = append(authTypes, trigger.AuthenticationRef.Name) + } + } + return strings.Join(triggersTypes, ","), strings.Join(authTypes, ",") +} diff --git a/apis/keda/v1alpha1/zz_generated.deepcopy.go b/apis/keda/v1alpha1/zz_generated.deepcopy.go index a144aeb07d3..a6e01a22f4f 100755 --- a/apis/keda/v1alpha1/zz_generated.deepcopy.go +++ b/apis/keda/v1alpha1/zz_generated.deepcopy.go @@ -816,6 +816,16 @@ func (in *ScaledJobStatus) DeepCopyInto(out *ScaledJobStatus) { *out = make(Conditions, len(*in)) copy(*out, *in) } + if in.TriggersTypes != nil { + in, out := &in.TriggersTypes, &out.TriggersTypes + *out = new(string) + **out = **in + } + if in.AuthenticationsTypes != nil { + in, out := &in.AuthenticationsTypes, &out.AuthenticationsTypes + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaledJobStatus. @@ -1008,6 +1018,16 @@ func (in *ScaledObjectStatus) DeepCopyInto(out *ScaledObjectStatus) { *out = new(int32) **out = **in } + if in.TriggersTypes != nil { + in, out := &in.TriggersTypes, &out.TriggersTypes + *out = new(string) + **out = **in + } + if in.AuthenticationsTypes != nil { + in, out := &in.AuthenticationsTypes, &out.AuthenticationsTypes + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaledObjectStatus. diff --git a/config/crd/bases/keda.sh_scaledjobs.yaml b/config/crd/bases/keda.sh_scaledjobs.yaml index 5ccf72f2d46..b093d3fe25f 100644 --- a/config/crd/bases/keda.sh_scaledjobs.yaml +++ b/config/crd/bases/keda.sh_scaledjobs.yaml @@ -23,12 +23,6 @@ spec: - jsonPath: .spec.maxReplicaCount name: Max type: integer - - jsonPath: .spec.triggers[*].type - name: Triggers - type: string - - jsonPath: .spec.triggers[*].authenticationRef.name - name: Authentication - type: string - jsonPath: .status.conditions[?(@.type=="Ready")].status name: Ready type: string @@ -38,6 +32,12 @@ spec: - jsonPath: .status.conditions[?(@.type=="Paused")].status name: Paused type: string + - jsonPath: .status.triggersTypes + name: Triggers + type: string + - jsonPath: .status.authenticationsTypes + name: Authentications + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -380,6 +380,23 @@ spec: description: |- Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string type: object spec: description: |- @@ -6684,6 +6701,23 @@ spec: May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string type: object spec: description: |- @@ -8028,6 +8062,8 @@ spec: properties: Paused: type: string + authenticationsTypes: + type: string conditions: description: Conditions an array representation to store multiple Conditions @@ -8055,6 +8091,8 @@ spec: lastActiveTime: format: date-time type: string + triggersTypes: + type: string type: object type: object served: true diff --git a/config/crd/bases/keda.sh_scaledobjects.yaml b/config/crd/bases/keda.sh_scaledobjects.yaml index c4ded761ccf..779831c2f80 100644 --- a/config/crd/bases/keda.sh_scaledobjects.yaml +++ b/config/crd/bases/keda.sh_scaledobjects.yaml @@ -29,12 +29,6 @@ spec: - jsonPath: .spec.maxReplicaCount name: Max type: integer - - jsonPath: .spec.triggers[*].type - name: Triggers - type: string - - jsonPath: .spec.triggers[*].authenticationRef.name - name: Authentication - type: string - jsonPath: .status.conditions[?(@.type=="Ready")].status name: Ready type: string @@ -47,6 +41,12 @@ spec: - jsonPath: .status.conditions[?(@.type=="Paused")].status name: Paused type: string + - jsonPath: .status.triggersTypes + name: Triggers + type: string + - jsonPath: .status.authenticationsTypes + name: Authentications + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -309,6 +309,8 @@ spec: status: description: ScaledObjectStatus is the status for a ScaledObject resource properties: + authenticationsTypes: + type: string compositeScalerName: type: string conditions: @@ -387,6 +389,8 @@ spec: type: object scaleTargetKind: type: string + triggersTypes: + type: string type: object required: - spec diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index fd9cf99b941..f8bb706592c 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -18,7 +18,8 @@ rules: resources: - events verbs: - - '*' + - create + - patch - apiGroups: - "" resources: @@ -93,34 +94,58 @@ rules: resources: - horizontalpodautoscalers verbs: - - '*' + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - batch resources: - jobs verbs: - - '*' + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - eventing.keda.sh resources: - cloudeventsources - cloudeventsources/status verbs: - - '*' + - get + - list + - patch + - update + - watch - apiGroups: - eventing.keda.sh resources: - clustercloudeventsources - clustercloudeventsources/status verbs: - - '*' + - get + - list + - patch + - update + - watch - apiGroups: - keda.sh resources: - clustertriggerauthentications - clustertriggerauthentications/status verbs: - - '*' + - get + - list + - patch + - update + - watch - apiGroups: - keda.sh resources: @@ -128,7 +153,11 @@ rules: - scaledjobs/finalizers - scaledjobs/status verbs: - - '*' + - get + - list + - patch + - update + - watch - apiGroups: - keda.sh resources: @@ -136,14 +165,22 @@ rules: - scaledobjects/finalizers - scaledobjects/status verbs: - - '*' + - get + - list + - patch + - update + - watch - apiGroups: - keda.sh resources: - triggerauthentications - triggerauthentications/status verbs: - - '*' + - get + - list + - patch + - update + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role @@ -168,4 +205,10 @@ rules: resources: - leases verbs: - - '*' + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/controllers/eventing/cloudeventsource_controller.go b/controllers/eventing/cloudeventsource_controller.go index 5bb78f5e9ca..0a4c2e6a523 100644 --- a/controllers/eventing/cloudeventsource_controller.go +++ b/controllers/eventing/cloudeventsource_controller.go @@ -54,7 +54,7 @@ func NewCloudEventSourceReconciler(c client.Client, e eventemitter.EventHandler) } } -// +kubebuilder:rbac:groups=eventing.keda.sh,resources=cloudeventsources;cloudeventsources/status,verbs="*" +// +kubebuilder:rbac:groups=eventing.keda.sh,resources=cloudeventsources;cloudeventsources/status,verbs=get;list;watch;update;patch // Reconcile performs reconciliation on the identified EventSource resource based on the request information passed, returns the result and an error (if any). diff --git a/controllers/eventing/clustercloudeventsource_controller.go b/controllers/eventing/clustercloudeventsource_controller.go index 0ccb26f811a..2204f18f0ca 100644 --- a/controllers/eventing/clustercloudeventsource_controller.go +++ b/controllers/eventing/clustercloudeventsource_controller.go @@ -54,7 +54,7 @@ func NewClusterCloudEventSourceReconciler(c client.Client, e eventemitter.EventH } } -// +kubebuilder:rbac:groups=eventing.keda.sh,resources=clustercloudeventsources;clustercloudeventsources/status,verbs="*" +// +kubebuilder:rbac:groups=eventing.keda.sh,resources=clustercloudeventsources;clustercloudeventsources/status,verbs=get;list;watch;update;patch // Reconcile performs reconciliation on the identified EventSource resource based on the request information passed, returns the result and an error (if any). func (r *ClusterCloudEventSourceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { diff --git a/controllers/keda/clustertriggerauthentication_controller.go b/controllers/keda/clustertriggerauthentication_controller.go index aabab91c4c3..a8d7718416d 100644 --- a/controllers/keda/clustertriggerauthentication_controller.go +++ b/controllers/keda/clustertriggerauthentication_controller.go @@ -57,7 +57,7 @@ func init() { clusterTriggerAuthPromMetricsLock = &sync.Mutex{} } -// +kubebuilder:rbac:groups=keda.sh,resources=clustertriggerauthentications;clustertriggerauthentications/status,verbs="*" +// +kubebuilder:rbac:groups=keda.sh,resources=clustertriggerauthentications;clustertriggerauthentications/status,verbs=get;list;watch;update;patch // Reconcile performs reconciliation on the identified TriggerAuthentication resource based on the request information passed, returns the result and an error (if any). func (r *ClusterTriggerAuthenticationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { diff --git a/controllers/keda/scaledjob_controller.go b/controllers/keda/scaledjob_controller.go index 4a145c7c024..7ffc6ed04f2 100755 --- a/controllers/keda/scaledjob_controller.go +++ b/controllers/keda/scaledjob_controller.go @@ -50,8 +50,8 @@ import ( "github.com/kedacore/keda/v2/pkg/util" ) -// +kubebuilder:rbac:groups=keda.sh,resources=scaledjobs;scaledjobs/finalizers;scaledjobs/status,verbs="*" -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs="*" +// +kubebuilder:rbac:groups=keda.sh,resources=scaledjobs;scaledjobs/finalizers;scaledjobs/status,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;update;patch;create;delete // ScaledJobReconciler reconciles a ScaledJob object type ScaledJobReconciler struct { @@ -191,6 +191,11 @@ func (r *ScaledJobReconciler) reconcileScaledJob(ctx context.Context, logger log return "ScaledJob doesn't have correct triggers specification", err } + err = r.updateStatusWithTriggersAndAuthsTypes(ctx, logger, scaledJob) + if err != nil { + return "Cannot update ScaledJob status with triggers'names and authentications'names", err + } + // nosemgrep: trailofbits.go.invalid-usage-of-modified-variable.invalid-usage-of-modified-variable msg, err := r.deletePreviousVersionScaleJobs(ctx, logger, scaledJob) if err != nil { @@ -404,3 +409,14 @@ func (r *ScaledJobReconciler) updateTriggerAuthenticationStatusOnDelete(ctx cont return triggerAuthenticationStatus }) } + +func (r *ScaledJobReconciler) updateStatusWithTriggersAndAuthsTypes(ctx context.Context, logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob) error { + triggersTypes, authsTypes := kedav1alpha1.CombinedTriggersAndAuthenticationsTypes(scaledJob.Spec.Triggers) + status := scaledJob.Status.DeepCopy() + status.TriggersTypes = &triggersTypes + status.AuthenticationsTypes = &authsTypes + + logger.V(1).Info("Updating ScaledJob status with triggers and authentications types", "triggersTypes", triggersTypes, "authenticationsTypes", authsTypes) + + return kedastatus.UpdateScaledJobStatus(ctx, r.Client, logger, scaledJob, status) +} diff --git a/controllers/keda/scaledobject_controller.go b/controllers/keda/scaledobject_controller.go index b18c84ae61d..ff6194c4ea7 100755 --- a/controllers/keda/scaledobject_controller.go +++ b/controllers/keda/scaledobject_controller.go @@ -54,16 +54,16 @@ import ( "github.com/kedacore/keda/v2/pkg/util" ) -// +kubebuilder:rbac:groups=keda.sh,resources=scaledobjects;scaledobjects/finalizers;scaledobjects/status,verbs="*" -// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs="*" +// +kubebuilder:rbac:groups=keda.sh,resources=scaledobjects;scaledobjects/finalizers;scaledobjects/status,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;update;patch;create;delete // +kubebuilder:rbac:groups="",resources=configmaps;configmaps/status,verbs=get;list;watch -// +kubebuilder:rbac:groups="",resources=events,verbs="*" +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch // +kubebuilder:rbac:groups="",resources=pods;services;services;secrets;external,verbs=get;list;watch // +kubebuilder:rbac:groups="*",resources="*/scale",verbs=get;list;watch;update;patch // +kubebuilder:rbac:groups="",resources="serviceaccounts",verbs=list;watch // +kubebuilder:rbac:groups="*",resources="*",verbs=get // +kubebuilder:rbac:groups="apps",resources=deployments;statefulsets,verbs=list;watch -// +kubebuilder:rbac:groups="coordination.k8s.io",namespace=keda,resources=leases,verbs="*" +// +kubebuilder:rbac:groups="coordination.k8s.io",namespace=keda,resources=leases,verbs=get;list;watch;update;patch;create;delete // +kubebuilder:rbac:groups="",resources="limitranges",verbs=list;watch // ScaledObjectReconciler reconciles a ScaledObject object @@ -282,6 +282,11 @@ func (r *ScaledObjectReconciler) reconcileScaledObject(ctx context.Context, logg return "ScaledObject doesn't have correct triggers specification", err } + err = r.updateStatusWithTriggersAndAuthsTypes(ctx, logger, scaledObject) + if err != nil { + return "Cannot update ScaledObject status with triggers'types and authentications'types", err + } + // Create a new HPA or update existing one according to ScaledObject newHPACreated, err := r.ensureHPAForScaledObjectExists(ctx, logger, scaledObject, &gvkr) if err != nil { @@ -621,3 +626,14 @@ func (r *ScaledObjectReconciler) updateTriggerAuthenticationStatusOnDelete(ctx c return triggerAuthenticationStatus }) } + +func (r *ScaledObjectReconciler) updateStatusWithTriggersAndAuthsTypes(ctx context.Context, logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error { + triggersTypes, authsTypes := kedav1alpha1.CombinedTriggersAndAuthenticationsTypes(scaledObject.Spec.Triggers) + status := scaledObject.Status.DeepCopy() + status.TriggersTypes = &triggersTypes + status.AuthenticationsTypes = &authsTypes + + logger.V(1).Info("Updating ScaledObject status with triggers and authentications types", "triggersTypes", triggersTypes, "authenticationsTypes", authsTypes) + + return kedastatus.UpdateScaledObjectStatus(ctx, r.Client, logger, scaledObject, status) +} diff --git a/controllers/keda/triggerauthentication_controller.go b/controllers/keda/triggerauthentication_controller.go index b5ab9e1bd82..2627c6683b1 100755 --- a/controllers/keda/triggerauthentication_controller.go +++ b/controllers/keda/triggerauthentication_controller.go @@ -58,7 +58,7 @@ func init() { triggerAuthPromMetricsLock = &sync.Mutex{} } -// +kubebuilder:rbac:groups=keda.sh,resources=triggerauthentications;triggerauthentications/status,verbs="*" +// +kubebuilder:rbac:groups=keda.sh,resources=triggerauthentications;triggerauthentications/status,verbs=get;list;watch;update;patch // Reconcile performs reconciliation on the identified TriggerAuthentication resource based on the request information passed, returns the result and an error (if any). func (r *TriggerAuthenticationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { diff --git a/go.mod b/go.mod index 4322a6cafee..8ae68b7c5c4 100644 --- a/go.mod +++ b/go.mod @@ -117,6 +117,7 @@ require ( sigs.k8s.io/controller-tools v0.15.0 sigs.k8s.io/custom-metrics-apiserver v1.29.0 sigs.k8s.io/kustomize/kustomize/v5 v5.4.3 + github.com/beanstalkd/go-beanstalk v0.2.0 ) // Remove this when they merge the PR and cut a release https://github.com/open-policy-agent/cert-controller/pull/202 diff --git a/go.sum b/go.sum index e7cefad3489..5db432af5c9 100644 --- a/go.sum +++ b/go.sum @@ -951,6 +951,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzx github.com/aws/smithy-go v1.13.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/beanstalkd/go-beanstalk v0.2.0 h1:6UOJugnu47uNB2jJO/lxyDgeD1Yds7owYi1USELqexA= +github.com/beanstalkd/go-beanstalk v0.2.0/go.mod h1:/G8YTyChOtpOArwLTQPY1CHB+i212+av35bkPXXj56Y= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= diff --git a/pkg/scalers/aws_sqs_queue_scaler.go b/pkg/scalers/aws_sqs_queue_scaler.go index 1c5976685fb..1de9bf7f285 100644 --- a/pkg/scalers/aws_sqs_queue_scaler.go +++ b/pkg/scalers/aws_sqs_queue_scaler.go @@ -20,6 +20,7 @@ import ( ) const ( + defaultTargetQueueLength = 5 targetQueueLengthDefault = 5 activationTargetQueueLengthDefault = 0 defaultScaleOnInFlight = true @@ -243,12 +244,23 @@ func (s *awsSqsQueueScaler) getAwsSqsQueueLength(ctx context.Context) (int64, er return -1, err } + return s.processQueueLengthFromSqsQueueAttributesOutput(output) +} + +func (s *awsSqsQueueScaler) processQueueLengthFromSqsQueueAttributesOutput(output *sqs.GetQueueAttributesOutput) (int64, error) { var approximateNumberOfMessages int64 + for _, awsSqsQueueMetric := range s.metadata.awsSqsQueueMetricNames { - metricValue, err := strconv.ParseInt(output.Attributes[string(awsSqsQueueMetric)], 10, 32) + metricValueString, exists := output.Attributes[string(awsSqsQueueMetric)] + if !exists { + return -1, fmt.Errorf("metric %s not found in SQS queue attributes", awsSqsQueueMetric) + } + + metricValue, err := strconv.ParseInt(metricValueString, 10, 64) if err != nil { return -1, err } + approximateNumberOfMessages += metricValue } diff --git a/pkg/scalers/aws_sqs_queue_scaler_test.go b/pkg/scalers/aws_sqs_queue_scaler_test.go index ce6ca3d5b4d..6e0b065ab07 100644 --- a/pkg/scalers/aws_sqs_queue_scaler_test.go +++ b/pkg/scalers/aws_sqs_queue_scaler_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" "github.com/go-logr/logr" "github.com/stretchr/testify/assert" @@ -444,3 +445,91 @@ func TestAWSSQSScalerGetMetrics(t *testing.T) { } } } + +func TestProcessQueueLengthFromSqsQueueAttributesOutput(t *testing.T) { + scalerCreationFunc := func() *awsSqsQueueScaler { + return &awsSqsQueueScaler{ + metadata: &awsSqsQueueMetadata{ + awsSqsQueueMetricNames: []types.QueueAttributeName{types.QueueAttributeNameApproximateNumberOfMessages, types.QueueAttributeNameApproximateNumberOfMessagesNotVisible, types.QueueAttributeNameApproximateNumberOfMessagesDelayed}, + }, + } + } + + tests := map[string]struct { + s *awsSqsQueueScaler + attributes *sqs.GetQueueAttributesOutput + expected int64 + errExpected bool + }{ + "properly formed queue attributes": { + s: scalerCreationFunc(), + attributes: &sqs.GetQueueAttributesOutput{ + Attributes: map[string]string{ + "ApproximateNumberOfMessages": "1", + "ApproximateNumberOfMessagesNotVisible": "0", + "ApproximateNumberOfMessagesDelayed": "0", + }, + }, + expected: 1, + errExpected: false, + }, + "missing ApproximateNumberOfMessages": { + s: scalerCreationFunc(), + attributes: &sqs.GetQueueAttributesOutput{ + Attributes: map[string]string{}, + }, + expected: -1, + errExpected: true, + }, + "invalid ApproximateNumberOfMessages": { + s: scalerCreationFunc(), + attributes: &sqs.GetQueueAttributesOutput{ + Attributes: map[string]string{ + "ApproximateNumberOfMessages": "NotInt", + "ApproximateNumberOfMessagesNotVisible": "0", + "ApproximateNumberOfMessagesDelayed": "0", + }, + }, + expected: -1, + errExpected: true, + }, + "32 bit int upper bound": { + s: scalerCreationFunc(), + attributes: &sqs.GetQueueAttributesOutput{ + Attributes: map[string]string{ + "ApproximateNumberOfMessages": "2147483647", + "ApproximateNumberOfMessagesNotVisible": "0", + "ApproximateNumberOfMessagesDelayed": "0", + }, + }, + expected: 2147483647, + errExpected: false, + }, + "32 bit int upper bound + 1": { + s: scalerCreationFunc(), + attributes: &sqs.GetQueueAttributesOutput{ + Attributes: map[string]string{ + "ApproximateNumberOfMessages": "2147483648", + "ApproximateNumberOfMessagesNotVisible": "0", + "ApproximateNumberOfMessagesDelayed": "0", + }, + }, + expected: 2147483648, + errExpected: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + result, err := test.s.processQueueLengthFromSqsQueueAttributesOutput(test.attributes) + + if test.errExpected { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + assert.Equal(t, test.expected, result) + }) + } +} diff --git a/pkg/scalers/azure_pipelines_scaler.go b/pkg/scalers/azure_pipelines_scaler.go index 99e6de0dff4..1c526570d4f 100644 --- a/pkg/scalers/azure_pipelines_scaler.go +++ b/pkg/scalers/azure_pipelines_scaler.go @@ -412,6 +412,16 @@ func getAzurePipelineRequest(ctx context.Context, logger logr.Logger, urlString return []byte{}, fmt.Errorf("the Azure DevOps REST API returned error. urlString: %s status: %d response: %s", urlString, r.StatusCode, string(b)) } + // Log when API Rate Limits are reached + rateLimitRemaining := r.Header[http.CanonicalHeaderKey("X-RateLimit-Remaining")] + if rateLimitRemaining != nil { + logger.V(1).Info(fmt.Sprintf("Warning: ADO TSTUs Left %s. When reaching zero requests are delayed, lower the polling interval. See https://learn.microsoft.com/en-us/azure/devops/integrate/concepts/rate-limits?view=azure-devops", rateLimitRemaining)) + } + rateLimitDelay := r.Header[http.CanonicalHeaderKey("X-RateLimit-Delay")] + if rateLimitDelay != nil { + logger.V(1).Info(fmt.Sprintf("Warning: Request to ADO API is delayed by %s seconds. Sending additional requests will increase delay until results are being blocked entirely", rateLimitDelay)) + } + return b, nil } diff --git a/pkg/scalers/azure_pipelines_scaler_test.go b/pkg/scalers/azure_pipelines_scaler_test.go index f65460ac3b4..18b7ea66ff9 100644 --- a/pkg/scalers/azure_pipelines_scaler_test.go +++ b/pkg/scalers/azure_pipelines_scaler_test.go @@ -222,6 +222,34 @@ func TestAzurePipelinesMatchedAgent(t *testing.T) { } } +func TestAzurePipelinesDelayed(t *testing.T) { + var apiStub = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // nosemgrep: no-direct-write-to-responsewriter + w.Header().Add("X-RateLimit-Limit", "0") + // nosemgrep: no-direct-write-to-responsewriter + w.Header().Add("X-RateLimit-Delay", "42") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(buildLoadJSON()) + })) + + meta := getMatchedAgentMetaData(apiStub.URL) + + mockAzurePipelinesScaler := azurePipelinesScaler{ + metadata: meta, + httpClient: http.DefaultClient, + } + + queueLen, err := mockAzurePipelinesScaler.GetAzurePipelinesQueueLength(context.Background()) + + if err != nil { + t.Fail() + } + + if queueLen < 1 { + t.Fail() + } +} + func getDemandJobMetaData(url string) *azurePipelinesMetadata { meta := getMatchedAgentMetaData(url) meta.parent = "" diff --git a/pkg/scalers/azure_queue_scaler.go b/pkg/scalers/azure_queue_scaler.go index 6f642ec04bf..151a37e8e7c 100644 --- a/pkg/scalers/azure_queue_scaler.go +++ b/pkg/scalers/azure_queue_scaler.go @@ -19,7 +19,6 @@ package scalers import ( "context" "fmt" - "strconv" "strings" "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue" @@ -34,37 +33,30 @@ import ( ) const ( - queueLengthMetricName = "queueLength" - activationQueueLengthMetricName = "activationQueueLength" - defaultTargetQueueLength = 5 - externalMetricType = "External" - QueueLengthStrategyAll string = "all" - QueueLengthStrategyVisibleOnly string = "visibleonly" + externalMetricType = "External" + queueLengthStrategyVisibleOnly = "visibleonly" ) -var ( - maxPeekMessages int32 = 32 -) +var maxPeekMessages int32 = 32 type azureQueueScaler struct { metricType v2.MetricTargetType - metadata *azureQueueMetadata + metadata azureQueueMetadata queueClient *azqueue.QueueClient logger logr.Logger } type azureQueueMetadata struct { - targetQueueLength int64 - activationTargetQueueLength int64 - queueName string - connection string - accountName string - endpointSuffix string - queueLengthStrategy string - triggerIndex int + ActivationQueueLength int64 `keda:"name=activationQueueLength, order=triggerMetadata, default=0"` + QueueName string `keda:"name=queueName, order=triggerMetadata"` + QueueLength int64 `keda:"name=queueLength, order=triggerMetadata, default=5"` + Connection string `keda:"name=connection, order=authParams;triggerMetadata;resolvedEnv, optional"` + AccountName string `keda:"name=accountName, order=triggerMetadata, optional"` + EndpointSuffix string `keda:"name=endpointSuffix, order=triggerMetadata, optional"` + QueueLengthStrategy string `keda:"name=queueLengthStrategy, order=triggerMetadata, enum=all;visibleonly, default=all"` + TriggerIndex int } -// NewAzureQueueScaler creates a new scaler for queue func NewAzureQueueScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) if err != nil { @@ -73,14 +65,14 @@ func NewAzureQueueScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { logger := InitializeLogger(config, "azure_queue_scaler") - meta, podIdentity, err := parseAzureQueueMetadata(config, logger) + meta, podIdentity, err := parseAzureQueueMetadata(config) if err != nil { return nil, fmt.Errorf("error parsing azure queue metadata: %w", err) } - queueClient, err := azure.GetStorageQueueClient(logger, podIdentity, meta.connection, meta.accountName, meta.endpointSuffix, meta.queueName, config.GlobalHTTPTimeout) + queueClient, err := azure.GetStorageQueueClient(logger, podIdentity, meta.Connection, meta.AccountName, meta.EndpointSuffix, meta.QueueName, config.GlobalHTTPTimeout) if err != nil { - return nil, fmt.Errorf("error creating azure blob client: %w", err) + return nil, fmt.Errorf("error creating azure queue client: %w", err) } return &azureQueueScaler{ @@ -91,56 +83,18 @@ func NewAzureQueueScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { }, nil } -func parseAzureQueueMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger) (*azureQueueMetadata, kedav1alpha1.AuthPodIdentity, error) { +func parseAzureQueueMetadata(config *scalersconfig.ScalerConfig) (azureQueueMetadata, kedav1alpha1.AuthPodIdentity, error) { meta := azureQueueMetadata{} - meta.targetQueueLength = defaultTargetQueueLength - - if val, ok := config.TriggerMetadata[queueLengthMetricName]; ok { - queueLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - logger.Error(err, "Error parsing azure queue metadata", "queueLengthMetricName", queueLengthMetricName) - return nil, kedav1alpha1.AuthPodIdentity{}, - fmt.Errorf("error parsing azure queue metadata %s: %w", queueLengthMetricName, err) - } - - meta.targetQueueLength = queueLength - } - - meta.activationTargetQueueLength = 0 - if val, ok := config.TriggerMetadata[activationQueueLengthMetricName]; ok { - activationQueueLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - logger.Error(err, "Error parsing azure queue metadata", activationQueueLengthMetricName, activationQueueLengthMetricName) - return nil, kedav1alpha1.AuthPodIdentity{}, - fmt.Errorf("error parsing azure queue metadata %s: %w", activationQueueLengthMetricName, err) - } - - meta.activationTargetQueueLength = activationQueueLength + err := config.TypedConfig(&meta) + if err != nil { + return meta, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("error parsing azure queue metadata: %w", err) } endpointSuffix, err := azure.ParseAzureStorageEndpointSuffix(config.TriggerMetadata, azure.QueueEndpoint) if err != nil { - return nil, kedav1alpha1.AuthPodIdentity{}, err - } - - meta.endpointSuffix = endpointSuffix - - if val, ok := config.TriggerMetadata["queueName"]; ok && val != "" { - meta.queueName = val - } else { - return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("no queueName given") - } - - if val, ok := config.TriggerMetadata["queueLengthStrategy"]; ok && val != "" { - strategy := strings.ToLower(val) - if strategy == QueueLengthStrategyAll || strategy == QueueLengthStrategyVisibleOnly { - meta.queueLengthStrategy = strategy - } else { - return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("invalid queueLengthStrategy %s given", val) - } - } else { - meta.queueLengthStrategy = QueueLengthStrategyAll + return meta, kedav1alpha1.AuthPodIdentity{}, err } + meta.EndpointSuffix = endpointSuffix // If the Use AAD Pod Identity is not present, or set to "none" // then check for connection string @@ -148,48 +102,39 @@ func parseAzureQueueMetadata(config *scalersconfig.ScalerConfig, logger logr.Log case "", kedav1alpha1.PodIdentityProviderNone: // Azure Queue Scaler expects a "connection" parameter in the metadata // of the scaler or in a TriggerAuthentication object - if config.AuthParams["connection"] != "" { - // Found the connection in a parameter from TriggerAuthentication - meta.connection = config.AuthParams["connection"] - } else if config.TriggerMetadata["connectionFromEnv"] != "" { - meta.connection = config.ResolvedEnv[config.TriggerMetadata["connectionFromEnv"]] - } - - if len(meta.connection) == 0 { - return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("no connection setting given") + if meta.Connection == "" { + return meta, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("no connection setting given") } case kedav1alpha1.PodIdentityProviderAzureWorkload: // If the Use AAD Pod Identity is present then check account name - if val, ok := config.TriggerMetadata["accountName"]; ok && val != "" { - meta.accountName = val - } else { - return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("no accountName given") + if meta.AccountName == "" { + return meta, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("no accountName given") } default: - return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("pod identity %s not supported for azure storage queues", config.PodIdentity.Provider) + return meta, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("pod identity %s not supported for azure storage queues", config.PodIdentity.Provider) } - meta.triggerIndex = config.TriggerIndex - - return &meta, config.PodIdentity, nil + meta.TriggerIndex = config.TriggerIndex + return meta, config.PodIdentity, nil } func (s *azureQueueScaler) Close(context.Context) error { return nil } +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric func (s *azureQueueScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { + metricName := kedautil.NormalizeString(fmt.Sprintf("azure-queue-%s", s.metadata.QueueName)) externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-queue-%s", s.metadata.queueName))), + Name: GenerateMetricNameWithIndex(s.metadata.TriggerIndex, metricName), }, - Target: GetMetricTarget(s.metricType, s.metadata.targetQueueLength), + Target: GetMetricTarget(s.metricType, s.metadata.QueueLength), } metricSpec := v2.MetricSpec{External: externalMetric, Type: externalMetricType} return []v2.MetricSpec{metricSpec} } -// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric func (s *azureQueueScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { queuelen, err := s.getMessageCount(ctx) if err != nil { @@ -198,12 +143,11 @@ func (s *azureQueueScaler) GetMetricsAndActivity(ctx context.Context, metricName } metric := GenerateMetricInMili(metricName, float64(queuelen)) - return []external_metrics.ExternalMetricValue{metric}, queuelen > s.metadata.activationTargetQueueLength, nil + return []external_metrics.ExternalMetricValue{metric}, queuelen > s.metadata.ActivationQueueLength, nil } func (s *azureQueueScaler) getMessageCount(ctx context.Context) (int64, error) { - strategy := strings.ToLower(s.metadata.queueLengthStrategy) - if strategy == QueueLengthStrategyVisibleOnly { + if strings.ToLower(s.metadata.QueueLengthStrategy) == queueLengthStrategyVisibleOnly { queue, err := s.queueClient.PeekMessages(ctx, &azqueue.PeekMessagesOptions{NumberOfMessages: &maxPeekMessages}) if err != nil { return 0, err diff --git a/pkg/scalers/azure_queue_scaler_test.go b/pkg/scalers/azure_queue_scaler_test.go index a36da33123c..e2e9ab73634 100644 --- a/pkg/scalers/azure_queue_scaler_test.go +++ b/pkg/scalers/azure_queue_scaler_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + v2 "k8s.io/api/autoscaling/v2" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" @@ -31,6 +33,7 @@ var testAzQueueResolvedEnv = map[string]string{ } type parseAzQueueMetadataTestData struct { + name string metadata map[string]string isError bool resolvedEnv map[string]string @@ -39,82 +42,281 @@ type parseAzQueueMetadataTestData struct { } type azQueueMetricIdentifier struct { + name string metadataTestData *parseAzQueueMetadataTestData triggerIndex int - name string + metricName string } var testAzQueueMetadata = []parseAzQueueMetadataTestData{ - // nothing passed - {map[string]string{}, true, testAzQueueResolvedEnv, map[string]string{}, ""}, - // properly formed - {map[string]string{"connectionFromEnv": "CONNECTION", "queueName": "sample", "queueLength": "5"}, false, testAzQueueResolvedEnv, map[string]string{}, ""}, - // Empty queueName - {map[string]string{"connectionFromEnv": "CONNECTION", "queueName": ""}, true, testAzQueueResolvedEnv, map[string]string{}, ""}, - // improperly formed queueLength - {map[string]string{"connectionFromEnv": "CONNECTION", "queueName": "sample", "queueLength": "AA"}, true, testAzQueueResolvedEnv, map[string]string{}, ""}, - // improperly formed activationQueueLength - {map[string]string{"connectionFromEnv": "CONNECTION", "queueName": "sample", "queueLength": "1", "activationQueueLength": "AA"}, true, testAzQueueResolvedEnv, map[string]string{}, ""}, - // podIdentity = azure-workload with account name - {map[string]string{"accountName": "sample_acc", "queueName": "sample_queue"}, false, testAzQueueResolvedEnv, map[string]string{}, kedav1alpha1.PodIdentityProviderAzureWorkload}, - // podIdentity = azure-workload without account name - {map[string]string{"accountName": "", "queueName": "sample_queue"}, true, testAzQueueResolvedEnv, map[string]string{}, kedav1alpha1.PodIdentityProviderAzureWorkload}, - // podIdentity = azure-workload without queue name - {map[string]string{"accountName": "sample_acc", "queueName": ""}, true, testAzQueueResolvedEnv, map[string]string{}, kedav1alpha1.PodIdentityProviderAzureWorkload}, - // podIdentity = azure-workload with cloud - {map[string]string{"accountName": "sample_acc", "queueName": "sample_queue", "cloud": "AzurePublicCloud"}, false, testAzQueueResolvedEnv, map[string]string{}, kedav1alpha1.PodIdentityProviderAzureWorkload}, - // podIdentity = azure-workload with invalid cloud - {map[string]string{"accountName": "sample_acc", "queueName": "sample_queue", "cloud": "InvalidCloud"}, true, testAzQueueResolvedEnv, map[string]string{}, kedav1alpha1.PodIdentityProviderAzureWorkload}, - // podIdentity = azure-workload with private cloud and endpoint suffix - {map[string]string{"accountName": "sample_acc", "queueName": "sample_queue", "cloud": "Private", "endpointSuffix": "queue.core.private.cloud"}, false, testAzQueueResolvedEnv, map[string]string{}, kedav1alpha1.PodIdentityProviderAzureWorkload}, - // podIdentity = azure-workload with private cloud and no endpoint suffix - {map[string]string{"accountName": "sample_acc", "queueName": "sample_queue", "cloud": "Private", "endpointSuffix": ""}, true, testAzQueueResolvedEnv, map[string]string{}, kedav1alpha1.PodIdentityProviderAzureWorkload}, - // podIdentity = azure-workload with endpoint suffix and no cloud - {map[string]string{"accountName": "sample_acc", "queueName": "sample_queue", "cloud": "", "endpointSuffix": "ignored"}, false, testAzQueueResolvedEnv, map[string]string{}, kedav1alpha1.PodIdentityProviderAzureWorkload}, - // connection from authParams - {map[string]string{"queueName": "sample", "queueLength": "5"}, false, testAzQueueResolvedEnv, map[string]string{"connection": "value"}, kedav1alpha1.PodIdentityProviderNone}, + { + name: "nothing passed", + metadata: map[string]string{}, + isError: true, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: "", + }, + { + name: "properly formed", + metadata: map[string]string{"connectionFromEnv": "CONNECTION", "queueName": "sample", "queueLength": "5"}, + isError: false, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: "", + }, + { + name: "empty queueName", + metadata: map[string]string{"connectionFromEnv": "CONNECTION", "queueName": ""}, + isError: true, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: "", + }, + { + name: "improperly formed queueLength", + metadata: map[string]string{"connectionFromEnv": "CONNECTION", "queueName": "sample", "queueLength": "AA"}, + isError: true, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: "", + }, + { + name: "improperly formed activationQueueLength", + metadata: map[string]string{"connectionFromEnv": "CONNECTION", "queueName": "sample", "queueLength": "1", "activationQueueLength": "AA"}, + isError: true, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: "", + }, + { + name: "podIdentity azure-workload with account name", + metadata: map[string]string{"accountName": "sample_acc", "queueName": "sample_queue"}, + isError: false, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: kedav1alpha1.PodIdentityProviderAzureWorkload, + }, + { + name: "podIdentity azure-workload without account name", + metadata: map[string]string{"accountName": "", "queueName": "sample_queue"}, + isError: true, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: kedav1alpha1.PodIdentityProviderAzureWorkload, + }, + { + name: "podIdentity azure-workload without queue name", + metadata: map[string]string{"accountName": "sample_acc", "queueName": ""}, + isError: true, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: kedav1alpha1.PodIdentityProviderAzureWorkload, + }, + { + name: "podIdentity azure-workload with cloud", + metadata: map[string]string{"accountName": "sample_acc", "queueName": "sample_queue", "cloud": "AzurePublicCloud"}, + isError: false, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: kedav1alpha1.PodIdentityProviderAzureWorkload, + }, + { + name: "podIdentity azure-workload with invalid cloud", + metadata: map[string]string{"accountName": "sample_acc", "queueName": "sample_queue", "cloud": "InvalidCloud"}, + isError: true, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: kedav1alpha1.PodIdentityProviderAzureWorkload, + }, + { + name: "podIdentity azure-workload with private cloud and endpoint suffix", + metadata: map[string]string{"accountName": "sample_acc", "queueName": "sample_queue", "cloud": "Private", "endpointSuffix": "queue.core.private.cloud"}, + isError: false, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: kedav1alpha1.PodIdentityProviderAzureWorkload, + }, + { + name: "podIdentity azure-workload with private cloud and no endpoint suffix", + metadata: map[string]string{"accountName": "sample_acc", "queueName": "sample_queue", "cloud": "Private", "endpointSuffix": ""}, + isError: true, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: kedav1alpha1.PodIdentityProviderAzureWorkload, + }, + { + name: "podIdentity azure-workload with endpoint suffix and no cloud", + metadata: map[string]string{"accountName": "sample_acc", "queueName": "sample_queue", "cloud": "", "endpointSuffix": "ignored"}, + isError: false, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: kedav1alpha1.PodIdentityProviderAzureWorkload, + }, + { + name: "connection from authParams", + metadata: map[string]string{"queueName": "sample", "queueLength": "5"}, + isError: false, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{"connection": "value"}, + podIdentity: kedav1alpha1.PodIdentityProviderNone, + }, + { + name: "valid queueLengthStrategy all", + metadata: map[string]string{"connectionFromEnv": "CONNECTION", "queueName": "sample", "queueLength": "5", "queueLengthStrategy": "all"}, + isError: false, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: "", + }, + { + name: "valid queueLengthStrategy visibleonly", + metadata: map[string]string{"connectionFromEnv": "CONNECTION", "queueName": "sample", "queueLength": "5", "queueLengthStrategy": "visibleonly"}, + isError: false, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: "", + }, + { + name: "invalid queueLengthStrategy", + metadata: map[string]string{"connectionFromEnv": "CONNECTION", "queueName": "sample", "queueLength": "5", "queueLengthStrategy": "invalid"}, + isError: true, + resolvedEnv: testAzQueueResolvedEnv, + authParams: map[string]string{}, + podIdentity: "", + }, } var azQueueMetricIdentifiers = []azQueueMetricIdentifier{ - {&testAzQueueMetadata[1], 0, "s0-azure-queue-sample"}, - {&testAzQueueMetadata[5], 1, "s1-azure-queue-sample_queue"}, + { + name: "properly formed queue metric", + metadataTestData: &testAzQueueMetadata[1], + triggerIndex: 0, + metricName: "s0-azure-queue-sample", + }, + { + name: "azure-workload queue metric", + metadataTestData: &testAzQueueMetadata[5], + triggerIndex: 1, + metricName: "s1-azure-queue-sample_queue", + }, +} + +type mockAzureQueueClient struct { + peekMessageCount int + totalMessages int32 +} + +func (m *mockAzureQueueClient) getMessageCount(visibleOnly bool) int64 { + if visibleOnly { + if m.peekMessageCount >= 32 { + return int64(m.totalMessages) + } + return int64(m.peekMessageCount) + } + return int64(m.totalMessages) } func TestAzQueueParseMetadata(t *testing.T) { for _, testData := range testAzQueueMetadata { - _, podIdentity, err := parseAzureQueueMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadata, - ResolvedEnv: testData.resolvedEnv, AuthParams: testData.authParams, - PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentity}}, - logr.Discard()) - if err != nil && !testData.isError { - t.Error("Expected success but got error", err) - } - if testData.isError && err == nil { - t.Errorf("Expected error but got success. testData: %v", testData) - } - if testData.podIdentity != "" && testData.podIdentity != podIdentity.Provider && err == nil { - t.Error("Expected success but got error: podIdentity value is not returned as expected") - } + t.Run(testData.name, func(t *testing.T) { + config := &scalersconfig.ScalerConfig{ + TriggerMetadata: testData.metadata, + ResolvedEnv: testData.resolvedEnv, + AuthParams: testData.authParams, + PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentity}, + } + + _, podIdentity, err := parseAzureQueueMetadata(config) + if err != nil && !testData.isError { + t.Error("Expected success but got error", err) + } + if testData.isError && err == nil { + t.Errorf("Expected error but got success. testData: %v", testData) + } + if testData.podIdentity != "" && testData.podIdentity != podIdentity.Provider && err == nil { + t.Error("Expected success but got error: podIdentity value is not returned as expected") + } + }) } } func TestAzQueueGetMetricSpecForScaling(t *testing.T) { for _, testData := range azQueueMetricIdentifiers { - meta, _, err := parseAzureQueueMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, - ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, - PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, TriggerIndex: testData.triggerIndex}, - logr.Discard()) - if err != nil { - t.Fatal("Could not parse metadata:", err) - } - mockAzQueueScaler := azureQueueScaler{ - metadata: meta, - } + t.Run(testData.name, func(t *testing.T) { + config := &scalersconfig.ScalerConfig{ + TriggerMetadata: testData.metadataTestData.metadata, + ResolvedEnv: testData.metadataTestData.resolvedEnv, + AuthParams: testData.metadataTestData.authParams, + PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, + TriggerIndex: testData.triggerIndex, + } - metricSpec := mockAzQueueScaler.GetMetricSpecForScaling(context.Background()) - metricName := metricSpec[0].External.Metric.Name - if metricName != testData.name { - t.Error("Wrong External metric source name:", metricName) - } + meta, _, err := parseAzureQueueMetadata(config) + if err != nil { + t.Fatal("Could not parse metadata:", err) + } + + mockAzQueueScaler := azureQueueScaler{ + metadata: meta, + logger: logr.Discard(), + metricType: v2.AverageValueMetricType, + } + + metricSpec := mockAzQueueScaler.GetMetricSpecForScaling(context.Background()) + metricName := metricSpec[0].External.Metric.Name + assert.Equal(t, testData.metricName, metricName) + }) + } +} + +func TestAzQueueGetMessageCount(t *testing.T) { + testCases := []struct { + name string + strategy string + peekMessages int + totalMessages int32 + expectedCount int64 + }{ + { + name: "default strategy (all)", + strategy: "", + peekMessages: 10, + totalMessages: 100, + expectedCount: 100, + }, + { + name: "explicit all strategy", + strategy: "all", + peekMessages: 10, + totalMessages: 100, + expectedCount: 100, + }, + { + name: "visibleonly strategy with less than 32 messages", + strategy: "visibleonly", + peekMessages: 10, + totalMessages: 100, + expectedCount: 10, + }, + { + name: "visibleonly strategy with 32 or more messages", + strategy: "visibleonly", + peekMessages: 35, + totalMessages: 100, + expectedCount: 100, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockClient := &mockAzureQueueClient{ + peekMessageCount: tc.peekMessages, + totalMessages: tc.totalMessages, + } + + count := mockClient.getMessageCount(tc.strategy == "visibleonly") + assert.Equal(t, tc.expectedCount, count) + }) } } diff --git a/pkg/scalers/beanstalkd_scaler.go b/pkg/scalers/beanstalkd_scaler.go new file mode 100644 index 00000000000..a658b4f104f --- /dev/null +++ b/pkg/scalers/beanstalkd_scaler.go @@ -0,0 +1,179 @@ +package scalers + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" + + beanstalk "github.com/beanstalkd/go-beanstalk" + "github.com/go-logr/logr" + "github.com/mitchellh/mapstructure" + v2 "k8s.io/api/autoscaling/v2" + "k8s.io/metrics/pkg/apis/external_metrics" + + "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" + "github.com/kedacore/keda/v2/pkg/util" +) + +const ( + beanstalkdJobsMetricName = "jobs" + beanstalkdValueConfigName = "value" + beanstalkdActivationValueTriggerConfigName = "activationValue" + beanstalkdMetricType = "External" + beanstalkdNetworkProtocol = "tcp" +) + +type BeanstalkdScaler struct { + metricType v2.MetricTargetType + metadata *BeanstalkdMetadata + connection *beanstalk.Conn + tube *beanstalk.Tube + logger logr.Logger +} + +type BeanstalkdMetadata struct { + Server string `keda:"name=server, order=triggerMetadata"` + Tube string `keda:"name=tube, order=triggerMetadata"` + Value float64 `keda:"name=value, order=triggerMetadata"` + ActivationValue float64 `keda:"name=activationValue, order=triggerMetadata, optional"` + IncludeDelayed bool `keda:"name=includeDelayed, order=triggerMetadata, optional"` + Timeout uint `keda:"name=timeout, order=triggerMetadata, optional, default=30"` + TriggerIndex int +} + +// TubeStats represents a set of tube statistics. +type tubeStats struct { + TotalJobs int64 `mapstructure:"total-jobs"` + JobsReady int64 `mapstructure:"current-jobs-ready"` + JobsReserved int64 `mapstructure:"current-jobs-reserved"` + JobsUrgent int64 `mapstructure:"current-jobs-urgent"` + JobsBuried int64 `mapstructure:"current-jobs-buried"` + JobsDelayed int64 `mapstructure:"current-jobs-delayed"` +} + +func NewBeanstalkdScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { + s := &BeanstalkdScaler{} + + metricType, err := GetMetricTargetType(config) + if err != nil { + return nil, fmt.Errorf("error getting scaler metric type: %w", err) + } + s.metricType = metricType + + s.logger = InitializeLogger(config, "beanstalkd_scaler") + + meta, err := parseBeanstalkdMetadata(config) + if err != nil { + return nil, fmt.Errorf("error parsing beanstalkd metadata: %w", err) + } + s.metadata = meta + + timeout := time.Duration(s.metadata.Timeout) * time.Second + + conn, err := beanstalk.DialTimeout(beanstalkdNetworkProtocol, s.metadata.Server, timeout) + if err != nil { + return nil, fmt.Errorf("error connecting to beanstalkd: %w", err) + } + + s.connection = conn + + s.tube = beanstalk.NewTube(s.connection, meta.Tube) + + return s, nil +} + +func parseBeanstalkdMetadata(config *scalersconfig.ScalerConfig) (*BeanstalkdMetadata, error) { + meta := &BeanstalkdMetadata{} + + meta.TriggerIndex = config.TriggerIndex + if err := config.TypedConfig(meta); err != nil { + return nil, fmt.Errorf("error parsing beanstalkd metadata: %w", err) + } + + return meta, nil +} + +func (s *BeanstalkdScaler) getTubeStats(ctx context.Context) (*tubeStats, error) { + errCh := make(chan error) + statsCh := make(chan *tubeStats) + + go func() { + rawStats, err := s.tube.Stats() + if err != nil { + errCh <- fmt.Errorf("error retrieving stats from beanstalkd: %w", err) + } + + var stats tubeStats + err = mapstructure.WeakDecode(rawStats, &stats) + if err != nil { + errCh <- fmt.Errorf("error decoding stats from beanstalkd: %w", err) + } + + statsCh <- &stats + }() + + select { + case err := <-errCh: + if errors.Is(err, beanstalk.ErrNotFound) { + s.logger.Info("tube not found, setting stats to 0") + return &tubeStats{ + TotalJobs: 0, + JobsReady: 0, + JobsDelayed: 0, + JobsReserved: 0, + JobsUrgent: 0, + JobsBuried: 0, + }, nil + } + return nil, err + case tubeStats := <-statsCh: + return tubeStats, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (s *BeanstalkdScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + stats, err := s.getTubeStats(ctx) + if err != nil { + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error interacting with beanstalkd: %w", err) + } + + totalJobs := stats.JobsReady + stats.JobsReserved + + if s.metadata.IncludeDelayed { + totalJobs += stats.JobsDelayed + } + + metric := GenerateMetricInMili(metricName, float64(totalJobs)) + isActive := float64(totalJobs) > s.metadata.ActivationValue + + return []external_metrics.ExternalMetricValue{metric}, isActive, nil +} + +func (s *BeanstalkdScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { + externalMetric := &v2.ExternalMetricSource{ + Metric: v2.MetricIdentifier{ + Name: GenerateMetricNameWithIndex(s.metadata.TriggerIndex, util.NormalizeString(fmt.Sprintf("beanstalkd-%s", url.QueryEscape(s.metadata.Tube)))), + }, + Target: GetMetricTargetMili(s.metricType, s.metadata.Value), + } + metricSpec := v2.MetricSpec{ + External: externalMetric, Type: beanstalkdMetricType, + } + + return []v2.MetricSpec{metricSpec} +} + +func (s *BeanstalkdScaler) Close(context.Context) error { + if s.connection != nil { + err := s.connection.Close() + if err != nil { + s.logger.Error(err, "Error closing beanstalkd connection") + return err + } + } + return nil +} diff --git a/pkg/scalers/beanstalkd_scaler_test.go b/pkg/scalers/beanstalkd_scaler_test.go new file mode 100644 index 00000000000..11f77176b5c --- /dev/null +++ b/pkg/scalers/beanstalkd_scaler_test.go @@ -0,0 +1,257 @@ +package scalers + +import ( + "context" + "fmt" + "net" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v3" + + "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" +) + +const ( + beanstalkdServer = "localhost:3000" +) + +type parseBeanstalkdMetadataTestData struct { + metadata map[string]string + isError bool +} + +type beanstalkdMetricIdentifier struct { + metadataTestData *parseBeanstalkdMetadataTestData + index int + name string +} + +type tubeStatsTestData struct { + response map[string]interface{} + metadata map[string]string + isActive bool +} + +var testBeanstalkdMetadata = []parseBeanstalkdMetadataTestData{ + // nothing passed + {map[string]string{}, true}, + // properly formed + {map[string]string{"server": beanstalkdServer, "tube": "delayed", "value": "1", "includeDelayed": "true"}, false}, + // no includeDelayed + {map[string]string{"server": beanstalkdServer, "tube": "no-delayed", "value": "1"}, false}, + // missing server + {map[string]string{"tube": "stats-tube", "value": "1", "includeDelayed": "true"}, true}, + // missing tube + {map[string]string{"server": beanstalkdServer, "value": "1", "includeDelayed": "true"}, true}, + // missing value + {map[string]string{"server": beanstalkdServer, "tube": "stats-tube", "includeDelayed": "true"}, true}, + // invalid value + {map[string]string{"server": beanstalkdServer, "tube": "stats-tube", "value": "lots", "includeDelayed": "true"}, true}, + // valid timeout + {map[string]string{"server": beanstalkdServer, "tube": "stats-tube", "value": "1", "includeDelayed": "true", "timeout": "1000"}, false}, + // invalid timeout + {map[string]string{"server": beanstalkdServer, "tube": "stats-tube", "value": "1", "includeDelayed": "true", "timeout": "-1"}, true}, + // activationValue passed + {map[string]string{"server": beanstalkdServer, "tube": "stats-tube", "value": "1", "activationValue": "10"}, false}, + // invalid activationValue passed + {map[string]string{"server": beanstalkdServer, "tube": "stats-tube", "value": "1", "activationValue": "AA"}, true}, +} + +var beanstalkdMetricIdentifiers = []beanstalkdMetricIdentifier{ + {&testBeanstalkdMetadata[2], 0, "s0-beanstalkd-no-delayed"}, + {&testBeanstalkdMetadata[1], 1, "s1-beanstalkd-delayed"}, +} + +var testTubeStatsTestData = []tubeStatsTestData{ + { + response: map[string]interface{}{ + "cmd-delete": 18, + "cmd-pause-tube": 0, + "current-jobs-buried": 6, + "current-jobs-delayed": 0, + "current-jobs-ready": 10, + "current-jobs-reserved": 0, + "current-jobs-urgent": 0, + "current-using": 3, + "current-waiting": 3, + "current-watching": 3, + "name": "form-crawler-notifications", + "pause": 0, + "pause-time-left": 0, + "total-jobs": 24, + }, + metadata: map[string]string{"server": beanstalkdServer, "tube": "no-delayed", "value": "2"}, + isActive: true, + }, + { + response: map[string]interface{}{ + "cmd-delete": 18, + "cmd-pause-tube": 0, + "current-jobs-buried": 0, + "current-jobs-delayed": 0, + "current-jobs-ready": 1, + "current-jobs-reserved": 0, + "current-jobs-urgent": 0, + "current-using": 3, + "current-waiting": 3, + "current-watching": 3, + "name": "form-crawler-notifications", + "pause": 0, + "pause-time-left": 0, + "total-jobs": 24, + }, + metadata: map[string]string{"server": beanstalkdServer, "tube": "no-delayed", "value": "3", "activationValue": "2"}, + isActive: false, + }, + { + response: map[string]interface{}{ + "cmd-delete": 18, + "cmd-pause-tube": 0, + "current-jobs-buried": 0, + "current-jobs-delayed": 10, + "current-jobs-ready": 0, + "current-jobs-reserved": 0, + "current-jobs-urgent": 0, + "current-using": 3, + "current-waiting": 3, + "current-watching": 3, + "name": "form-crawler-notifications", + "pause": 0, + "pause-time-left": 0, + "total-jobs": 24, + }, + metadata: map[string]string{"server": beanstalkdServer, "tube": "no-delayed", "value": "2"}, + isActive: false, + }, + { + response: map[string]interface{}{ + "cmd-delete": 18, + "cmd-pause-tube": 0, + "current-jobs-buried": 0, + "current-jobs-delayed": 10, + "current-jobs-ready": 0, + "current-jobs-reserved": 0, + "current-jobs-urgent": 0, + "current-using": 3, + "current-waiting": 3, + "current-watching": 3, + "name": "form-crawler-notifications", + "pause": 0, + "pause-time-left": 0, + "total-jobs": 24, + }, + metadata: map[string]string{"server": beanstalkdServer, "tube": "no-delayed", "value": "2", "includeDelayed": "true"}, + isActive: true, + }, +} + +func TestBeanstalkdParseMetadata(t *testing.T) { + for idx, testData := range testBeanstalkdMetadata { + meta, err := parseBeanstalkdMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadata}) + if err != nil && !testData.isError { + t.Error("Expected success but got error", err) + } + if testData.isError && err == nil { + t.Errorf("Expected error but got success in test case %d", idx) + } + if err == nil { + if val, ok := testData.metadata["includeDelayed"]; !ok { + assert.Equal(t, false, meta.IncludeDelayed) + } else { + boolVal, err := strconv.ParseBool(val) + if err != nil { + assert.Equal(t, boolVal, meta.IncludeDelayed) + } + } + } + } +} + +func TestBeanstalkdGetMetricSpecForScaling(t *testing.T) { + for _, testData := range beanstalkdMetricIdentifiers { + meta, err := parseBeanstalkdMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: nil, TriggerIndex: testData.index}) + if err != nil { + t.Fatal("could not parse metadata", err) + } + mockBeanstalkdScaler := BeanstalkdScaler{ + metadata: meta, + connection: nil, + tube: nil, + } + + metricSpec := mockBeanstalkdScaler.GetMetricSpecForScaling(context.Background()) + metricName := metricSpec[0].External.Metric.Name + assert.Equal(t, testData.name, metricName, "correct external source name") + } +} + +func TestGetTubeStats(t *testing.T) { + for _, testData := range testTubeStatsTestData { + testData := testData + yamlData, err := yaml.Marshal(testData.response) + if err != nil { + t.Fatal(err) + } + + response := []byte(fmt.Sprintf("OK %d\r\n", len(yamlData))) + response = append(response, yamlData...) + response = append(response, []byte("\r\n")...) + createTestServer(t, response) + + s, err := NewBeanstalkdScaler( + &scalersconfig.ScalerConfig{ + TriggerMetadata: testData.metadata, + GlobalHTTPTimeout: 1000 * time.Millisecond, + }, + ) + + assert.NoError(t, err) + + ctx := context.Background() + _, active, err := s.GetMetricsAndActivity(ctx, "Metric") + + assert.NoError(t, err) + + assert.Equal(t, testData.isActive, active) + } +} + +func TestGetTubeStatsNotFound(t *testing.T) { + testData := testTubeStatsTestData[0] + createTestServer(t, []byte("NOT_FOUND\r\n")) + s, err := NewBeanstalkdScaler( + &scalersconfig.ScalerConfig{ + TriggerMetadata: testData.metadata, + GlobalHTTPTimeout: 1000 * time.Millisecond, + }, + ) + + assert.NoError(t, err) + + ctx := context.Background() + _, active, err := s.GetMetricsAndActivity(ctx, "Metric") + + assert.NoError(t, err) + assert.False(t, active) +} + +func createTestServer(t *testing.T, response []byte) { + list, err := net.Listen("tcp", "localhost:3000") + if err != nil { + t.Fatal(err) + } + go func() { + defer list.Close() + conn, err := list.Accept() + if err != nil { + return + } + + _, err = conn.Write(response) + assert.NoError(t, err) + conn.Close() + }() +} diff --git a/pkg/scalers/cassandra_scaler.go b/pkg/scalers/cassandra_scaler.go index 6e8705d2d8d..b41dddb9dec 100644 --- a/pkg/scalers/cassandra_scaler.go +++ b/pkg/scalers/cassandra_scaler.go @@ -18,53 +18,70 @@ import ( kedautil "github.com/kedacore/keda/v2/pkg/util" ) -// cassandraScaler exposes a data pointer to CassandraMetadata and gocql.Session connection. type cassandraScaler struct { metricType v2.MetricTargetType - metadata *CassandraMetadata + metadata cassandraMetadata session *gocql.Session logger logr.Logger } -// CassandraMetadata defines metadata used by KEDA to query a Cassandra table. -type CassandraMetadata struct { - username string - password string - enableTLS bool - cert string - key string - ca string - clusterIPAddress string - port int - consistency gocql.Consistency - protocolVersion int - keyspace string - query string - targetQueryValue int64 - activationTargetQueryValue int64 - triggerIndex int +type cassandraMetadata struct { + Username string `keda:"name=username, order=triggerMetadata"` + Password string `keda:"name=password, order=authParams"` + TLS string `keda:"name=tls, order=authParams, enum=enable;disable, default=disable, optional"` + Cert string `keda:"name=cert, order=authParams, optional"` + Key string `keda:"name=key, order=authParams, optional"` + CA string `keda:"name=ca, order=authParams, optional"` + ClusterIPAddress string `keda:"name=clusterIPAddress, order=triggerMetadata"` + Port int `keda:"name=port, order=triggerMetadata, optional"` + Consistency string `keda:"name=consistency, order=triggerMetadata, default=one"` + ProtocolVersion int `keda:"name=protocolVersion, order=triggerMetadata, default=4"` + Keyspace string `keda:"name=keyspace, order=triggerMetadata"` + Query string `keda:"name=query, order=triggerMetadata"` + TargetQueryValue int64 `keda:"name=targetQueryValue, order=triggerMetadata"` + ActivationTargetQueryValue int64 `keda:"name=activationTargetQueryValue, order=triggerMetadata, default=0"` + TriggerIndex int } const ( - tlsEnable = "enable" - tlsDisable = "disable" + tlsEnable = "enable" ) -// NewCassandraScaler creates a new Cassandra scaler. +func (m *cassandraMetadata) Validate() error { + if m.TLS == tlsEnable && (m.Cert == "" || m.Key == "") { + return errors.New("both cert and key are required when TLS is enabled") + } + + // Handle port in ClusterIPAddress + splitVal := strings.Split(m.ClusterIPAddress, ":") + if len(splitVal) == 2 { + if port, err := strconv.Atoi(splitVal[1]); err == nil { + m.Port = port + return nil + } + } + + if m.Port == 0 { + return fmt.Errorf("no port given") + } + + m.ClusterIPAddress = net.JoinHostPort(m.ClusterIPAddress, fmt.Sprintf("%d", m.Port)) + return nil +} + +// NewCassandraScaler creates a new Cassandra scaler func NewCassandraScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) if err != nil { return nil, fmt.Errorf("error getting scaler metric type: %w", err) } - logger := InitializeLogger(config, "cassandra_scaler") - meta, err := parseCassandraMetadata(config) if err != nil { return nil, fmt.Errorf("error parsing cassandra metadata: %w", err) } - session, err := newCassandraSession(meta, logger) + session, err := newCassandraSession(meta, InitializeLogger(config, "cassandra_scaler")) if err != nil { return nil, fmt.Errorf("error establishing cassandra session: %w", err) } @@ -73,108 +90,27 @@ func NewCassandraScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, session: session, - logger: logger, + logger: InitializeLogger(config, "cassandra_scaler"), }, nil } -// parseCassandraMetadata parses the metadata and returns a CassandraMetadata or an error if the ScalerConfig is invalid. -func parseCassandraMetadata(config *scalersconfig.ScalerConfig) (*CassandraMetadata, error) { - meta := &CassandraMetadata{} - var err error - - if val, ok := config.TriggerMetadata["query"]; ok { - meta.query = val - } else { - return nil, fmt.Errorf("no query given") - } - - if val, ok := config.TriggerMetadata["targetQueryValue"]; ok { - targetQueryValue, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return nil, fmt.Errorf("targetQueryValue parsing error %w", err) - } - meta.targetQueryValue = targetQueryValue - } else { - if config.AsMetricSource { - meta.targetQueryValue = 0 - } else { - return nil, fmt.Errorf("no targetQueryValue given") - } - } - - meta.activationTargetQueryValue = 0 - if val, ok := config.TriggerMetadata["activationTargetQueryValue"]; ok { - activationTargetQueryValue, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return nil, fmt.Errorf("activationTargetQueryValue parsing error %w", err) - } - meta.activationTargetQueryValue = activationTargetQueryValue - } - - if val, ok := config.TriggerMetadata["username"]; ok { - meta.username = val - } else { - return nil, fmt.Errorf("no username given") - } - - if val, ok := config.TriggerMetadata["port"]; ok { - port, err := strconv.Atoi(val) - if err != nil { - return nil, fmt.Errorf("port parsing error %w", err) - } - meta.port = port - } - - if val, ok := config.TriggerMetadata["clusterIPAddress"]; ok { - splitval := strings.Split(val, ":") - port := splitval[len(splitval)-1] - - _, err := strconv.Atoi(port) - switch { - case err == nil: - meta.clusterIPAddress = val - case meta.port > 0: - meta.clusterIPAddress = net.JoinHostPort(val, fmt.Sprintf("%d", meta.port)) - default: - return nil, fmt.Errorf("no port given") - } - } else { - return nil, fmt.Errorf("no cluster IP address given") - } - - if val, ok := config.TriggerMetadata["protocolVersion"]; ok { - protocolVersion, err := strconv.Atoi(val) - if err != nil { - return nil, fmt.Errorf("protocolVersion parsing error %w", err) - } - meta.protocolVersion = protocolVersion - } else { - meta.protocolVersion = 4 - } - - if val, ok := config.TriggerMetadata["consistency"]; ok { - meta.consistency = gocql.ParseConsistency(val) - } else { - meta.consistency = gocql.One +func parseCassandraMetadata(config *scalersconfig.ScalerConfig) (cassandraMetadata, error) { + meta := cassandraMetadata{} + err := config.TypedConfig(&meta) + if err != nil { + return meta, fmt.Errorf("error parsing cassandra metadata: %w", err) } - if val, ok := config.TriggerMetadata["keyspace"]; ok { - meta.keyspace = val - } else { - return nil, fmt.Errorf("no keyspace given") - } - if val, ok := config.AuthParams["password"]; ok { - meta.password = val - } else { - return nil, fmt.Errorf("no password given") + if config.AsMetricSource { + meta.TargetQueryValue = 0 } - if err = parseCassandraTLS(config, meta); err != nil { + err = parseCassandraTLS(&meta) + if err != nil { return meta, err } - meta.triggerIndex = config.TriggerIndex - + meta.TriggerIndex = config.TriggerIndex return meta, nil } @@ -182,8 +118,8 @@ func createTempFile(prefix string, content string) (string, error) { tempCassandraDir := fmt.Sprintf("%s%c%s", os.TempDir(), os.PathSeparator, "cassandra") err := os.MkdirAll(tempCassandraDir, 0700) if err != nil { - return "", fmt.Errorf(`error creating temporary directory: %s. Error: %w - Note, when running in a container a writable /tmp/cassandra emptyDir must be mounted. Refer to documentation`, tempCassandraDir, err) + return "", fmt.Errorf(`error creating temporary directory: %s. Error: %w + Note, when running in a container a writable /tmp/cassandra emptyDir must be mounted. Refer to documentation`, tempCassandraDir, err) } f, err := os.CreateTemp(tempCassandraDir, prefix+"-*.pem") @@ -200,72 +136,48 @@ func createTempFile(prefix string, content string) (string, error) { return f.Name(), nil } -func parseCassandraTLS(config *scalersconfig.ScalerConfig, meta *CassandraMetadata) error { - meta.enableTLS = false - if val, ok := config.AuthParams["tls"]; ok { - val = strings.TrimSpace(val) - if val == tlsEnable { - certGiven := config.AuthParams["cert"] != "" - keyGiven := config.AuthParams["key"] != "" - caCertGiven := config.AuthParams["ca"] != "" - if certGiven && !keyGiven { - return errors.New("no key given") - } - if keyGiven && !certGiven { - return errors.New("no cert given") - } - if !keyGiven && !certGiven { - return errors.New("no cert/key given") - } +func parseCassandraTLS(meta *cassandraMetadata) error { + if meta.TLS == tlsEnable { + // Create temp files for certs + certFilePath, err := createTempFile("cert", meta.Cert) + if err != nil { + return fmt.Errorf("error creating cert file: %w", err) + } + meta.Cert = certFilePath - certFilePath, err := createTempFile("cert", config.AuthParams["cert"]) - if err != nil { - // handle error - return errors.New("Error creating cert file: " + err.Error()) - } + keyFilePath, err := createTempFile("key", meta.Key) + if err != nil { + return fmt.Errorf("error creating key file: %w", err) + } + meta.Key = keyFilePath - keyFilePath, err := createTempFile("key", config.AuthParams["key"]) + // If CA cert is given, make also file + if meta.CA != "" { + caCertFilePath, err := createTempFile("caCert", meta.CA) if err != nil { - // handle error - return errors.New("Error creating key file: " + err.Error()) + return fmt.Errorf("error creating ca file: %w", err) } - - meta.cert = certFilePath - meta.key = keyFilePath - meta.ca = config.AuthParams["ca"] - if !caCertGiven { - meta.ca = "" - } else { - caCertFilePath, err := createTempFile("caCert", config.AuthParams["ca"]) - meta.ca = caCertFilePath - if err != nil { - // handle error - return errors.New("Error creating ca file: " + err.Error()) - } - } - meta.enableTLS = true - } else if val != tlsDisable { - return fmt.Errorf("err incorrect value for TLS given: %s", val) + meta.CA = caCertFilePath } } return nil } -// newCassandraSession returns a new Cassandra session for the provided CassandraMetadata. -func newCassandraSession(meta *CassandraMetadata, logger logr.Logger) (*gocql.Session, error) { - cluster := gocql.NewCluster(meta.clusterIPAddress) - cluster.ProtoVersion = meta.protocolVersion - cluster.Consistency = meta.consistency +// newCassandraSession returns a new Cassandra session for the provided CassandraMetadata +func newCassandraSession(meta cassandraMetadata, logger logr.Logger) (*gocql.Session, error) { + cluster := gocql.NewCluster(meta.ClusterIPAddress) + cluster.ProtoVersion = meta.ProtocolVersion + cluster.Consistency = gocql.ParseConsistency(meta.Consistency) cluster.Authenticator = gocql.PasswordAuthenticator{ - Username: meta.username, - Password: meta.password, + Username: meta.Username, + Password: meta.Password, } - if meta.enableTLS { + if meta.TLS == tlsEnable { cluster.SslOpts = &gocql.SslOptions{ - CertPath: meta.cert, - KeyPath: meta.key, - CaPath: meta.ca, + CertPath: meta.Cert, + KeyPath: meta.Key, + CaPath: meta.CA, } } @@ -278,22 +190,19 @@ func newCassandraSession(meta *CassandraMetadata, logger logr.Logger) (*gocql.Se return session, nil } -// GetMetricSpecForScaling returns the MetricSpec for the Horizontal Pod Autoscaler. +// GetMetricSpecForScaling returns the MetricSpec for the Horizontal Pod Autoscaler func (s *cassandraScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("cassandra-%s", s.metadata.keyspace))), + Name: GenerateMetricNameWithIndex(s.metadata.TriggerIndex, kedautil.NormalizeString(fmt.Sprintf("cassandra-%s", s.metadata.Keyspace))), }, - Target: GetMetricTarget(s.metricType, s.metadata.targetQueryValue), - } - metricSpec := v2.MetricSpec{ - External: externalMetric, Type: externalMetricType, + Target: GetMetricTarget(s.metricType, s.metadata.TargetQueryValue), } - + metricSpec := v2.MetricSpec{External: externalMetric, Type: externalMetricType} return []v2.MetricSpec{metricSpec} } -// GetMetricsAndActivity returns a value for a supported metric or an error if there is a problem getting the metric. +// GetMetricsAndActivity returns a value for a supported metric or an error if there is a problem getting the metric func (s *cassandraScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { num, err := s.GetQueryResult(ctx) if err != nil { @@ -301,38 +210,36 @@ func (s *cassandraScaler) GetMetricsAndActivity(ctx context.Context, metricName } metric := GenerateMetricInMili(metricName, float64(num)) - - return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationTargetQueryValue, nil + return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.ActivationTargetQueryValue, nil } -// GetQueryResult returns the result of the scaler query. +// GetQueryResult returns the result of the scaler query func (s *cassandraScaler) GetQueryResult(ctx context.Context) (int64, error) { var value int64 - if err := s.session.Query(s.metadata.query).WithContext(ctx).Scan(&value); err != nil { + if err := s.session.Query(s.metadata.Query).WithContext(ctx).Scan(&value); err != nil { if err != gocql.ErrNotFound { s.logger.Error(err, "query failed") return 0, err } } - return value, nil } -// Close closes the Cassandra session connection. +// Close closes the Cassandra session connection func (s *cassandraScaler) Close(_ context.Context) error { // clean up any temporary files - if strings.TrimSpace(s.metadata.cert) != "" { - if err := os.Remove(s.metadata.cert); err != nil { + if s.metadata.Cert != "" { + if err := os.Remove(s.metadata.Cert); err != nil { return err } } - if strings.TrimSpace(s.metadata.key) != "" { - if err := os.Remove(s.metadata.key); err != nil { + if s.metadata.Key != "" { + if err := os.Remove(s.metadata.Key); err != nil { return err } } - if strings.TrimSpace(s.metadata.ca) != "" { - if err := os.Remove(s.metadata.ca); err != nil { + if s.metadata.CA != "" { + if err := os.Remove(s.metadata.CA); err != nil { return err } } diff --git a/pkg/scalers/cassandra_scaler_test.go b/pkg/scalers/cassandra_scaler_test.go index 39930946a56..d2e892b8c32 100644 --- a/pkg/scalers/cassandra_scaler_test.go +++ b/pkg/scalers/cassandra_scaler_test.go @@ -2,156 +2,318 @@ package scalers import ( "context" - "fmt" "os" "testing" "github.com/go-logr/logr" "github.com/gocql/gocql" + "github.com/stretchr/testify/assert" + v2 "k8s.io/api/autoscaling/v2" "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" ) type parseCassandraMetadataTestData struct { + name string metadata map[string]string - isError bool authParams map[string]string + isError bool } type parseCassandraTLSTestData struct { + name string authParams map[string]string isError bool - enableTLS bool + tlsEnabled bool } type cassandraMetricIdentifier struct { + name string metadataTestData *parseCassandraMetadataTestData triggerIndex int - name string + metricName string } var testCassandraMetadata = []parseCassandraMetadataTestData{ - // nothing passed - {map[string]string{}, true, map[string]string{}}, - // everything is passed in verbatim - {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "clusterIPAddress": "cassandra.test", "keyspace": "test_keyspace", "TriggerIndex": "0"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}}, - // metricName is generated from keyspace - {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "TriggerIndex": "0"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}}, - // no query passed - {map[string]string{"targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "TriggerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}}, - // no targetQueryValue passed - {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "TriggerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}}, - // no username passed - {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "TriggerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}}, - // no port passed - {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test", "keyspace": "test_keyspace", "TriggerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}}, - // no clusterIPAddress passed - {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "keyspace": "test_keyspace", "TriggerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}}, - // no keyspace passed - {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "TriggerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}}, - // no password passed - {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "TriggerIndex": "0"}, true, map[string]string{}}, - // fix issue[4110] passed - {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "clusterIPAddress": "https://cassandra.test", "keyspace": "test_keyspace", "TriggerIndex": "0"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}}, + { + name: "nothing passed", + metadata: map[string]string{}, + authParams: map[string]string{}, + isError: true, + }, + { + name: "everything passed verbatim", + metadata: map[string]string{ + "query": "SELECT COUNT(*) FROM test_keyspace.test_table;", + "targetQueryValue": "1", + "username": "cassandra", + "port": "9042", + "clusterIPAddress": "cassandra.test", + "keyspace": "test_keyspace", + }, + authParams: map[string]string{"password": "Y2Fzc2FuZHJhCg=="}, + isError: false, + }, + { + name: "metricName from keyspace", + metadata: map[string]string{ + "query": "SELECT COUNT(*) FROM test_keyspace.test_table;", + "targetQueryValue": "1", + "username": "cassandra", + "clusterIPAddress": "cassandra.test:9042", + "keyspace": "test_keyspace", + }, + authParams: map[string]string{"password": "Y2Fzc2FuZHJhCg=="}, + isError: false, + }, + { + name: "no query", + metadata: map[string]string{ + "targetQueryValue": "1", + "username": "cassandra", + "clusterIPAddress": "cassandra.test:9042", + "keyspace": "test_keyspace", + }, + authParams: map[string]string{"password": "Y2Fzc2FuZHJhCg=="}, + isError: true, + }, + { + name: "no targetQueryValue", + metadata: map[string]string{ + "query": "SELECT COUNT(*) FROM test_keyspace.test_table;", + "username": "cassandra", + "clusterIPAddress": "cassandra.test:9042", + "keyspace": "test_keyspace", + }, + authParams: map[string]string{"password": "Y2Fzc2FuZHJhCg=="}, + isError: true, + }, + { + name: "no username", + metadata: map[string]string{ + "query": "SELECT COUNT(*) FROM test_keyspace.test_table;", + "targetQueryValue": "1", + "clusterIPAddress": "cassandra.test:9042", + "keyspace": "test_keyspace", + }, + authParams: map[string]string{"password": "Y2Fzc2FuZHJhCg=="}, + isError: true, + }, + { + name: "no port", + metadata: map[string]string{ + "query": "SELECT COUNT(*) FROM test_keyspace.test_table;", + "targetQueryValue": "1", + "username": "cassandra", + "clusterIPAddress": "cassandra.test", + "keyspace": "test_keyspace", + }, + authParams: map[string]string{"password": "Y2Fzc2FuZHJhCg=="}, + isError: true, + }, + { + name: "no clusterIPAddress", + metadata: map[string]string{ + "query": "SELECT COUNT(*) FROM test_keyspace.test_table;", + "targetQueryValue": "1", + "username": "cassandra", + "port": "9042", + "keyspace": "test_keyspace", + }, + authParams: map[string]string{"password": "Y2Fzc2FuZHJhCg=="}, + isError: true, + }, + { + name: "no keyspace", + metadata: map[string]string{ + "query": "SELECT COUNT(*) FROM test_keyspace.test_table;", + "targetQueryValue": "1", + "username": "cassandra", + "clusterIPAddress": "cassandra.test:9042", + }, + authParams: map[string]string{"password": "Y2Fzc2FuZHJhCg=="}, + isError: true, + }, + { + name: "no password", + metadata: map[string]string{ + "query": "SELECT COUNT(*) FROM test_keyspace.test_table;", + "targetQueryValue": "1", + "username": "cassandra", + "clusterIPAddress": "cassandra.test:9042", + "keyspace": "test_keyspace", + }, + authParams: map[string]string{}, + isError: true, + }, + { + name: "with https prefix", + metadata: map[string]string{ + "query": "SELECT COUNT(*) FROM test_keyspace.test_table;", + "targetQueryValue": "1", + "username": "cassandra", + "port": "9042", + "clusterIPAddress": "https://cassandra.test", + "keyspace": "test_keyspace", + }, + authParams: map[string]string{"password": "Y2Fzc2FuZHJhCg=="}, + isError: false, + }, } var tlsAuthParamsTestData = []parseCassandraTLSTestData{ - // success, TLS cert/key - {map[string]string{"tls": "enable", "cert": "ceert", "key": "keey", "password": "Y2Fzc2FuZHJhCg=="}, false, true}, - // failure, TLS missing cert - {map[string]string{"tls": "enable", "key": "keey", "password": "Y2Fzc2FuZHJhCg=="}, true, false}, - // failure, TLS missing key - {map[string]string{"tls": "enable", "cert": "ceert", "password": "Y2Fzc2FuZHJhCg=="}, true, false}, - // failure, TLS invalid - {map[string]string{"tls": "yes", "cert": "ceert", "key": "keeey", "password": "Y2Fzc2FuZHJhCg=="}, true, false}, + { + name: "success with cert/key", + authParams: map[string]string{ + "tls": "enable", + "cert": "test-cert", + "key": "test-key", + "password": "Y2Fzc2FuZHJhCg==", + }, + isError: false, + tlsEnabled: true, + }, + { + name: "failure missing cert", + authParams: map[string]string{ + "tls": "enable", + "key": "test-key", + "password": "Y2Fzc2FuZHJhCg==", + }, + isError: true, + tlsEnabled: false, + }, + { + name: "failure missing key", + authParams: map[string]string{ + "tls": "enable", + "cert": "test-cert", + "password": "Y2Fzc2FuZHJhCg==", + }, + isError: true, + tlsEnabled: false, + }, + { + name: "failure invalid tls value", + authParams: map[string]string{ + "tls": "yes", + "cert": "test-cert", + "key": "test-key", + "password": "Y2Fzc2FuZHJhCg==", + }, + isError: true, + tlsEnabled: false, + }, } var cassandraMetricIdentifiers = []cassandraMetricIdentifier{ - {&testCassandraMetadata[1], 0, "s0-cassandra-test_keyspace"}, - {&testCassandraMetadata[2], 1, "s1-cassandra-test_keyspace"}, + { + name: "everything passed verbatim", + metadataTestData: &testCassandraMetadata[1], + triggerIndex: 0, + metricName: "s0-cassandra-test_keyspace", + }, + { + name: "metricName from keyspace", + metadataTestData: &testCassandraMetadata[2], + triggerIndex: 1, + metricName: "s1-cassandra-test_keyspace", + }, +} + +var successMetaData = map[string]string{ + "query": "SELECT COUNT(*) FROM test_keyspace.test_table;", + "targetQueryValue": "1", + "username": "cassandra", + "clusterIPAddress": "cassandra.test:9042", + "keyspace": "test_keyspace", } func TestCassandraParseMetadata(t *testing.T) { - testCaseNum := 1 for _, testData := range testCassandraMetadata { - _, err := parseCassandraMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) - if err != nil && !testData.isError { - t.Errorf("Expected success but got error for unit test # %v", testCaseNum) - } - if testData.isError && err == nil { - t.Errorf("Expected error but got success for unit test # %v", testCaseNum) - } - testCaseNum++ + t.Run(testData.name, func(t *testing.T) { + _, err := parseCassandraMetadata(&scalersconfig.ScalerConfig{ + TriggerMetadata: testData.metadata, + AuthParams: testData.authParams, + }) + if err != nil && !testData.isError { + t.Error("Expected success but got error", err) + } + if testData.isError && err == nil { + t.Error("Expected error but got success") + } + }) } } func TestCassandraGetMetricSpecForScaling(t *testing.T) { for _, testData := range cassandraMetricIdentifiers { - meta, err := parseCassandraMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex, AuthParams: testData.metadataTestData.authParams}) - if err != nil { - t.Fatal("Could not parse metadata:", err) - } - cluster := gocql.NewCluster(meta.clusterIPAddress) - session, _ := cluster.CreateSession() - mockCassandraScaler := cassandraScaler{"", meta, session, logr.Discard()} - - metricSpec := mockCassandraScaler.GetMetricSpecForScaling(context.Background()) - metricName := metricSpec[0].External.Metric.Name - if metricName != testData.name { - t.Errorf("Wrong External metric source name: %s, expected: %s", metricName, testData.name) - } - } -} + t.Run(testData.name, func(t *testing.T) { + meta, err := parseCassandraMetadata(&scalersconfig.ScalerConfig{ + TriggerMetadata: testData.metadataTestData.metadata, + TriggerIndex: testData.triggerIndex, + AuthParams: testData.metadataTestData.authParams, + }) + if err != nil { + t.Fatal("Could not parse metadata:", err) + } + mockCassandraScaler := cassandraScaler{ + metricType: v2.AverageValueMetricType, + metadata: meta, + session: &gocql.Session{}, + logger: logr.Discard(), + } -func assertCertContents(testData parseCassandraTLSTestData, meta *CassandraMetadata, prop string) error { - if testData.authParams[prop] != "" { - var path string - switch prop { - case "cert": - path = meta.cert - case "key": - path = meta.key - } - data, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("expected to find '%v' file at %v", prop, path) - } - contents := string(data) - if contents != testData.authParams[prop] { - return fmt.Errorf("expected value: '%v' but got '%v'", testData.authParams[prop], contents) - } + metricSpec := mockCassandraScaler.GetMetricSpecForScaling(context.Background()) + metricName := metricSpec[0].External.Metric.Name + assert.Equal(t, testData.metricName, metricName) + }) } - return nil } -var successMetaData = map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "TriggerIndex": "0"} - func TestParseCassandraTLS(t *testing.T) { for _, testData := range tlsAuthParamsTestData { - meta, err := parseCassandraMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: successMetaData, AuthParams: testData.authParams}) - - if err != nil && !testData.isError { - t.Error("Expected success but got error", err) - } - if testData.isError && err == nil { - t.Error("Expected error but got success") - } - if meta.enableTLS != testData.enableTLS { - t.Errorf("Expected enableTLS to be set to %v but got %v\n", testData.enableTLS, meta.enableTLS) - } - if meta.enableTLS { - if meta.cert != testData.authParams["cert"] { - err := assertCertContents(testData, meta, "cert") - if err != nil { - t.Errorf(err.Error()) - } - } - if meta.key != testData.authParams["key"] { - err := assertCertContents(testData, meta, "key") - if err != nil { - t.Errorf(err.Error()) + t.Run(testData.name, func(t *testing.T) { + meta, err := parseCassandraMetadata(&scalersconfig.ScalerConfig{ + TriggerMetadata: successMetaData, + AuthParams: testData.authParams, + }) + + if testData.isError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, testData.tlsEnabled, meta.TLS == "enable") + + if meta.TLS == "enable" { + // Verify cert contents + if testData.authParams["cert"] != "" { + data, err := os.ReadFile(meta.Cert) + assert.NoError(t, err) + assert.Equal(t, testData.authParams["cert"], string(data)) + // Cleanup + defer os.Remove(meta.Cert) + } + + // Verify key contents + if testData.authParams["key"] != "" { + data, err := os.ReadFile(meta.Key) + assert.NoError(t, err) + assert.Equal(t, testData.authParams["key"], string(data)) + // Cleanup + defer os.Remove(meta.Key) + } + + // Verify CA contents if present + if testData.authParams["ca"] != "" { + data, err := os.ReadFile(meta.CA) + assert.NoError(t, err) + assert.Equal(t, testData.authParams["ca"], string(data)) + // Cleanup + defer os.Remove(meta.CA) + } } } - } + }) } } diff --git a/pkg/scalers/couchdb_scaler.go b/pkg/scalers/couchdb_scaler.go index 62ab5890493..b84332b7127 100644 --- a/pkg/scalers/couchdb_scaler.go +++ b/pkg/scalers/couchdb_scaler.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "net" - "strconv" couchdb "github.com/go-kivik/couchdb/v3" "github.com/go-kivik/kivik/v3" @@ -19,216 +18,168 @@ import ( type couchDBScaler struct { metricType v2.MetricTargetType - metadata *couchDBMetadata + metadata couchDBMetadata client *kivik.Client logger logr.Logger } +type couchDBMetadata struct { + ConnectionString string `keda:"name=connectionString,order=authParams;triggerMetadata;resolvedEnv,optional"` + Host string `keda:"name=host,order=authParams;triggerMetadata,optional"` + Port string `keda:"name=port,order=authParams;triggerMetadata,optional"` + Username string `keda:"name=username,order=authParams;triggerMetadata,optional"` + Password string `keda:"name=password,order=authParams;triggerMetadata;resolvedEnv,optional"` + DBName string `keda:"name=dbName,order=authParams;triggerMetadata,optional"` + Query string `keda:"name=query,order=triggerMetadata,optional"` + QueryValue int64 `keda:"name=queryValue,order=triggerMetadata,optional"` + ActivationQueryValue int64 `keda:"name=activationQueryValue,order=triggerMetadata,default=0,optional"` + TriggerIndex int +} + +func (m *couchDBMetadata) Validate() error { + if m.ConnectionString == "" { + if m.Host == "" { + return fmt.Errorf("no host given") + } + if m.Port == "" { + return fmt.Errorf("no port given") + } + if m.Username == "" { + return fmt.Errorf("no username given") + } + if m.Password == "" { + return fmt.Errorf("no password given") + } + if m.DBName == "" { + return fmt.Errorf("no dbName given") + } + } + return nil +} + type couchDBQueryRequest struct { Selector map[string]interface{} `json:"selector"` Fields []string `json:"fields"` } -type couchDBMetadata struct { - connectionString string - host string - port string - username string - password string - dbName string - query string - queryValue int64 - activationQueryValue int64 - triggerIndex int -} - type Res struct { ID string `json:"_id"` Feet int `json:"feet"` Greeting string `json:"greeting"` } -func (s *couchDBScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { - externalMetric := &v2.ExternalMetricSource{ - Metric: v2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("coucdb-%s", s.metadata.dbName))), - }, - Target: GetMetricTarget(s.metricType, s.metadata.queryValue), +func NewCouchDBScaler(ctx context.Context, config *scalersconfig.ScalerConfig) (Scaler, error) { + metricType, err := GetMetricTargetType(config) + if err != nil { + return nil, fmt.Errorf("error getting scaler metric type: %w", err) } - metricSpec := v2.MetricSpec{ - External: externalMetric, Type: externalMetricType, + + meta, err := parseCouchDBMetadata(config) + if err != nil { + return nil, fmt.Errorf("error parsing couchdb metadata: %w", err) } - return []v2.MetricSpec{metricSpec} -} -func (s couchDBScaler) Close(ctx context.Context) error { - if s.client != nil { - err := s.client.Close(ctx) - if err != nil { - s.logger.Error(err, fmt.Sprintf("failed to close couchdb connection, because of %v", err)) - return err - } + connStr := meta.ConnectionString + if connStr == "" { + addr := net.JoinHostPort(meta.Host, meta.Port) + connStr = "http://" + addr } - return nil -} -func (s *couchDBScaler) getQueryResult(ctx context.Context) (int64, error) { - db := s.client.DB(ctx, s.metadata.dbName) - var request couchDBQueryRequest - err := json.Unmarshal([]byte(s.metadata.query), &request) + client, err := kivik.New("couch", connStr) if err != nil { - s.logger.Error(err, fmt.Sprintf("Couldn't unmarshal query string because of %v", err)) - return 0, err + return nil, fmt.Errorf("error creating couchdb client: %w", err) } - rows, err := db.Find(ctx, request, nil) + + err = client.Authenticate(ctx, couchdb.BasicAuth("admin", meta.Password)) if err != nil { - s.logger.Error(err, fmt.Sprintf("failed to fetch rows because of %v", err)) - return 0, err + return nil, fmt.Errorf("error authenticating with couchdb: %w", err) } - var count int64 - for rows.Next() { - count++ - res := Res{} - if err := rows.ScanDoc(&res); err != nil { - s.logger.Error(err, fmt.Sprintf("failed to scan the doc because of %v", err)) - return 0, err - } + + isConnected, err := client.Ping(ctx) + if !isConnected || err != nil { + return nil, fmt.Errorf("failed to ping couchdb: %w", err) } - return count, nil + + return &couchDBScaler{ + metricType: metricType, + metadata: meta, + client: client, + logger: InitializeLogger(config, "couchdb_scaler"), + }, nil } -func parseCouchDBMetadata(config *scalersconfig.ScalerConfig) (*couchDBMetadata, string, error) { - var connStr string - var err error +func parseCouchDBMetadata(config *scalersconfig.ScalerConfig) (couchDBMetadata, error) { meta := couchDBMetadata{} - - if val, ok := config.TriggerMetadata["query"]; ok { - meta.query = val - } else { - return nil, "", fmt.Errorf("no query given") + err := config.TypedConfig(&meta) + if err != nil { + return meta, fmt.Errorf("error parsing couchdb metadata: %w", err) } - if val, ok := config.TriggerMetadata["queryValue"]; ok { - queryValue, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return nil, "", fmt.Errorf("failed to convert %v to int, because of %w", val, err) - } - meta.queryValue = queryValue - } else { - if config.AsMetricSource { - meta.queryValue = 0 - } else { - return nil, "", fmt.Errorf("no queryValue given") - } + if meta.QueryValue == 0 && !config.AsMetricSource { + return meta, fmt.Errorf("no queryValue given") } - meta.activationQueryValue = 0 - if val, ok := config.TriggerMetadata["activationQueryValue"]; ok { - activationQueryValue, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return nil, "", fmt.Errorf("failed to convert %v to int, because of %w", val, err) - } - meta.activationQueryValue = activationQueryValue + if config.AsMetricSource { + meta.QueryValue = 0 } - dbName, err := GetFromAuthOrMeta(config, "dbName") - if err != nil { - return nil, "", err - } - meta.dbName = dbName - - switch { - case config.AuthParams["connectionString"] != "": - meta.connectionString = config.AuthParams["connectionString"] - case config.TriggerMetadata["connectionStringFromEnv"] != "": - meta.connectionString = config.ResolvedEnv[config.TriggerMetadata["connectionStringFromEnv"]] - default: - meta.connectionString = "" - host, err := GetFromAuthOrMeta(config, "host") - if err != nil { - return nil, "", err - } - meta.host = host - - port, err := GetFromAuthOrMeta(config, "port") - if err != nil { - return nil, "", err - } - meta.port = port - - username, err := GetFromAuthOrMeta(config, "username") - if err != nil { - return nil, "", err - } - meta.username = username + meta.TriggerIndex = config.TriggerIndex + return meta, nil +} - if config.AuthParams["password"] != "" { - meta.password = config.AuthParams["password"] - } else if config.TriggerMetadata["passwordFromEnv"] != "" { - meta.password = config.ResolvedEnv[config.TriggerMetadata["passwordFromEnv"]] - } - if len(meta.password) == 0 { - return nil, "", fmt.Errorf("no password given") +func (s *couchDBScaler) Close(ctx context.Context) error { + if s.client != nil { + if err := s.client.Close(ctx); err != nil { + s.logger.Error(err, "failed to close couchdb connection") + return err } } - - if meta.connectionString != "" { - connStr = meta.connectionString - } else { - // Build connection str - addr := net.JoinHostPort(meta.host, meta.port) - // nosemgrep: db-connection-string - connStr = "http://" + addr - } - meta.triggerIndex = config.TriggerIndex - return &meta, connStr, nil + return nil } -func NewCouchDBScaler(ctx context.Context, config *scalersconfig.ScalerConfig) (Scaler, error) { - metricType, err := GetMetricTargetType(config) - if err != nil { - return nil, fmt.Errorf("error getting scaler metric type: %w", err) +func (s *couchDBScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { + metricName := kedautil.NormalizeString(fmt.Sprintf("coucdb-%s", s.metadata.DBName)) + externalMetric := &v2.ExternalMetricSource{ + Metric: v2.MetricIdentifier{ + Name: GenerateMetricNameWithIndex(s.metadata.TriggerIndex, metricName), + }, + Target: GetMetricTarget(s.metricType, s.metadata.QueryValue), } + metricSpec := v2.MetricSpec{External: externalMetric, Type: externalMetricType} + return []v2.MetricSpec{metricSpec} +} - meta, connStr, err := parseCouchDBMetadata(config) - if err != nil { - return nil, fmt.Errorf("failed to parsing couchDB metadata, because of %w", err) - } +func (s *couchDBScaler) getQueryResult(ctx context.Context) (int64, error) { + db := s.client.DB(ctx, s.metadata.DBName) - client, err := kivik.New("couch", connStr) - if err != nil { - return nil, fmt.Errorf("%w", err) + var request couchDBQueryRequest + if err := json.Unmarshal([]byte(s.metadata.Query), &request); err != nil { + return 0, fmt.Errorf("error unmarshaling query: %w", err) } - err = client.Authenticate(ctx, couchdb.BasicAuth("admin", meta.password)) + rows, err := db.Find(ctx, request, nil) if err != nil { - return nil, err + return 0, fmt.Errorf("error executing query: %w", err) } - isconnected, err := client.Ping(ctx) - if !isconnected { - return nil, fmt.Errorf("%w", err) - } - if err != nil { - return nil, fmt.Errorf("failed to ping couchDB, because of %w", err) + var count int64 + for rows.Next() { + count++ + var res Res + if err := rows.ScanDoc(&res); err != nil { + return 0, fmt.Errorf("error scanning document: %w", err) + } } - return &couchDBScaler{ - metricType: metricType, - metadata: meta, - client: client, - logger: InitializeLogger(config, "couchdb_scaler"), - }, nil + return count, nil } -// GetMetricsAndActivity query from couchDB,and return to external metrics and activity func (s *couchDBScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { result, err := s.getQueryResult(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("failed to inspect couchdb, because of %w", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("failed to inspect couchdb: %w", err) } metric := GenerateMetricInMili(metricName, float64(result)) - - return append([]external_metrics.ExternalMetricValue{}, metric), result > s.metadata.activationQueryValue, nil + return []external_metrics.ExternalMetricValue{metric}, result > s.metadata.ActivationQueryValue, nil } diff --git a/pkg/scalers/couchdb_scaler_test.go b/pkg/scalers/couchdb_scaler_test.go index af7ae36b9b1..54b4a4b5b5a 100644 --- a/pkg/scalers/couchdb_scaler_test.go +++ b/pkg/scalers/couchdb_scaler_test.go @@ -7,6 +7,7 @@ import ( _ "github.com/go-kivik/couchdb/v3" "github.com/go-kivik/kivik/v3" "github.com/go-logr/logr" + v2 "k8s.io/api/autoscaling/v2" "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" ) @@ -17,6 +18,7 @@ var testCouchDBResolvedEnv = map[string]string{ } type parseCouchDBMetadataTestData struct { + name string metadata map[string]string authParams map[string]string resolvedEnv map[string]string @@ -32,6 +34,7 @@ type couchDBMetricIdentifier struct { var testCOUCHDBMetadata = []parseCouchDBMetadataTestData{ // No metadata { + name: "no metadata", metadata: map[string]string{}, authParams: map[string]string{}, resolvedEnv: testCouchDBResolvedEnv, @@ -39,6 +42,7 @@ var testCOUCHDBMetadata = []parseCouchDBMetadataTestData{ }, // connectionStringFromEnv { + name: "with connectionStringFromEnv", metadata: map[string]string{"query": `{ "selector": { "feet": { "$gt": 0 } }, "fields": ["_id", "feet", "greeting"] }`, "queryValue": "1", "connectionStringFromEnv": "CouchDB_CONN_STR", "dbName": "animals"}, authParams: map[string]string{}, resolvedEnv: testCouchDBResolvedEnv, @@ -46,6 +50,7 @@ var testCOUCHDBMetadata = []parseCouchDBMetadataTestData{ }, // with metric name { + name: "with metric name", metadata: map[string]string{"query": `{ "selector": { "feet": { "$gt": 0 } }, "fields": ["_id", "feet", "greeting"] }`, "queryValue": "1", "connectionStringFromEnv": "CouchDB_CONN_STR", "dbName": "animals"}, authParams: map[string]string{}, resolvedEnv: testCouchDBResolvedEnv, @@ -53,6 +58,7 @@ var testCOUCHDBMetadata = []parseCouchDBMetadataTestData{ }, // from trigger auth { + name: "from trigger auth", metadata: map[string]string{"query": `{ "selector": { "feet": { "$gt": 0 } }, "fields": ["_id", "feet", "greeting"] }`, "queryValue": "1"}, authParams: map[string]string{"dbName": "animals", "host": "localhost", "port": "5984", "username": "admin", "password": "YeFvQno9LylIm5MDgwcV"}, resolvedEnv: testCouchDBResolvedEnv, @@ -60,7 +66,8 @@ var testCOUCHDBMetadata = []parseCouchDBMetadataTestData{ }, // wrong activationQueryValue { - metadata: map[string]string{"query": `{ "selector": { "feet": { "$gt": 0 } }, "fields": ["_id", "feet", "greeting"] }`, "queryValue": "1", "activationQueryValue": "1", "connectionStringFromEnv": "CouchDB_CONN_STR", "dbName": "animals"}, + name: "wrong activationQueryValue", + metadata: map[string]string{"query": `{ "selector": { "feet": { "$gt": 0 } }, "fields": ["_id", "feet", "greeting"] }`, "queryValue": "1", "activationQueryValue": "a", "connectionStringFromEnv": "CouchDB_CONN_STR", "dbName": "animals"}, authParams: map[string]string{}, resolvedEnv: testCouchDBResolvedEnv, raisesError: true, @@ -74,25 +81,47 @@ var couchDBMetricIdentifiers = []couchDBMetricIdentifier{ func TestParseCouchDBMetadata(t *testing.T) { for _, testData := range testCOUCHDBMetadata { - _, _, err := parseCouchDBMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) - if err != nil && !testData.raisesError { - t.Error("Expected success but got error:", err) - } + t.Run(testData.name, func(t *testing.T) { + _, err := parseCouchDBMetadata(&scalersconfig.ScalerConfig{ + TriggerMetadata: testData.metadata, + AuthParams: testData.authParams, + ResolvedEnv: testData.resolvedEnv, + }) + if err != nil && !testData.raisesError { + t.Errorf("Test case '%s': Expected success but got error: %v", testData.name, err) + } + if testData.raisesError && err == nil { + t.Errorf("Test case '%s': Expected error but got success", testData.name) + } + }) } } func TestCouchDBGetMetricSpecForScaling(t *testing.T) { for _, testData := range couchDBMetricIdentifiers { - meta, _, err := parseCouchDBMetadata(&scalersconfig.ScalerConfig{ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex}) - if err != nil { - t.Fatal("Could not parse metadata:", err) - } - mockCouchDBScaler := couchDBScaler{"", meta, &kivik.Client{}, logr.Discard()} + t.Run(testData.name, func(t *testing.T) { + meta, err := parseCouchDBMetadata(&scalersconfig.ScalerConfig{ + ResolvedEnv: testData.metadataTestData.resolvedEnv, + AuthParams: testData.metadataTestData.authParams, + TriggerMetadata: testData.metadataTestData.metadata, + TriggerIndex: testData.triggerIndex, + }) + if err != nil { + t.Fatal("Could not parse metadata:", err) + } - metricSpec := mockCouchDBScaler.GetMetricSpecForScaling(context.Background()) - metricName := metricSpec[0].External.Metric.Name - if metricName != testData.name { - t.Error("Wrong External metric source name:", metricName) - } + mockCouchDBScaler := couchDBScaler{ + metricType: v2.AverageValueMetricType, + metadata: meta, + client: &kivik.Client{}, + logger: logr.Discard(), + } + + metricSpec := mockCouchDBScaler.GetMetricSpecForScaling(context.Background()) + metricName := metricSpec[0].External.Metric.Name + if metricName != testData.name { + t.Errorf("Wrong External metric source name: %s, expected: %s", metricName, testData.name) + } + }) } } diff --git a/pkg/scalers/cpu_memory_scaler.go b/pkg/scalers/cpu_memory_scaler.go index ce845414966..8da440ab77e 100644 --- a/pkg/scalers/cpu_memory_scaler.go +++ b/pkg/scalers/cpu_memory_scaler.go @@ -29,10 +29,6 @@ type cpuMemoryMetadata struct { MetricType v2.MetricTargetType } -func (m *cpuMemoryMetadata) Validate() error { - return nil -} - // NewCPUMemoryScaler creates a new cpuMemoryScaler func NewCPUMemoryScaler(resourceName v1.ResourceName, config *scalersconfig.ScalerConfig) (Scaler, error) { logger := InitializeLogger(config, "cpu_memory_scaler") @@ -42,13 +38,6 @@ func NewCPUMemoryScaler(resourceName v1.ResourceName, config *scalersconfig.Scal return nil, fmt.Errorf("error parsing %s metadata: %w", resourceName, err) } - if err := meta.Validate(); err != nil { - if meta.MetricType == "" { - return nil, fmt.Errorf("metricType is required") - } - return nil, fmt.Errorf("validation error: %w", err) - } - return &cpuMemoryScaler{ metadata: meta, resourceName: resourceName, diff --git a/pkg/scalers/github_runner_scaler.go b/pkg/scalers/github_runner_scaler.go index c9c93c501b1..026c363709e 100644 --- a/pkg/scalers/github_runner_scaler.go +++ b/pkg/scalers/github_runner_scaler.go @@ -43,6 +43,7 @@ type githubRunnerMetadata struct { personalAccessToken *string repos []string labels []string + noDefaultLabels bool targetWorkflowQueueLength int64 triggerIndex int applicationID *int64 @@ -372,8 +373,8 @@ func getValueFromMetaOrEnv(key string, metadata map[string]string, env map[strin } // getInt64ValueFromMetaOrEnv returns the value of the given key from the metadata or the environment variables -func getInt64ValueFromMetaOrEnv(key string, config *scalersconfig.ScalerConfig) (int64, error) { - sInt, err := getValueFromMetaOrEnv(key, config.TriggerMetadata, config.ResolvedEnv) +func getInt64ValueFromMetaOrEnv(key string, metadata map[string]string, env map[string]string) (int64, error) { + sInt, err := getValueFromMetaOrEnv(key, metadata, env) if err != nil { return -1, fmt.Errorf("error parsing %s: %w", key, err) } @@ -385,6 +386,20 @@ func getInt64ValueFromMetaOrEnv(key string, config *scalersconfig.ScalerConfig) return goodInt, nil } +// getInt64ValueFromMetaOrEnv returns the value of the given key from the metadata or the environment variables +func getBoolValueFromMetaOrEnv(key string, metadata map[string]string, env map[string]string) (bool, error) { + sBool, err := getValueFromMetaOrEnv(key, metadata, env) + if err != nil { + return false, fmt.Errorf("error parsing %s: %w", key, err) + } + + goodBool, err := strconv.ParseBool(sBool) + if err != nil { + return false, fmt.Errorf("error parsing %s: %w", key, err) + } + return goodBool, nil +} + func parseGitHubRunnerMetadata(config *scalersconfig.ScalerConfig) (*githubRunnerMetadata, error) { meta := &githubRunnerMetadata{} meta.targetWorkflowQueueLength = defaultTargetWorkflowQueueLength @@ -401,7 +416,7 @@ func parseGitHubRunnerMetadata(config *scalersconfig.ScalerConfig) (*githubRunne return nil, err } - if val, err := getInt64ValueFromMetaOrEnv("targetWorkflowQueueLength", config); err == nil && val != -1 { + if val, err := getInt64ValueFromMetaOrEnv("targetWorkflowQueueLength", config.TriggerMetadata, config.ResolvedEnv); err == nil && val != -1 { meta.targetWorkflowQueueLength = val } else { meta.targetWorkflowQueueLength = defaultTargetWorkflowQueueLength @@ -411,6 +426,12 @@ func parseGitHubRunnerMetadata(config *scalersconfig.ScalerConfig) (*githubRunne meta.labels = strings.Split(val, ",") } + if val, err := getBoolValueFromMetaOrEnv("noDefaultLabels", config.TriggerMetadata, config.ResolvedEnv); err == nil { + meta.noDefaultLabels = val + } else { + meta.noDefaultLabels = false + } + if val, err := getValueFromMetaOrEnv("repos", config.TriggerMetadata, config.ResolvedEnv); err == nil && val != "" { meta.repos = strings.Split(val, ",") } @@ -448,12 +469,12 @@ func setupGitHubApp(config *scalersconfig.ScalerConfig) (*int64, *int64, *string var instID *int64 var appKey *string - appIDVal, appIDErr := getInt64ValueFromMetaOrEnv("applicationID", config) + appIDVal, appIDErr := getInt64ValueFromMetaOrEnv("applicationID", config.TriggerMetadata, config.ResolvedEnv) if appIDErr == nil && appIDVal != -1 { appID = &appIDVal } - instIDVal, instIDErr := getInt64ValueFromMetaOrEnv("installationID", config) + instIDVal, instIDErr := getInt64ValueFromMetaOrEnv("installationID", config.TriggerMetadata, config.ResolvedEnv) if instIDErr == nil && instIDVal != -1 { instID = &instIDVal } @@ -625,9 +646,13 @@ func contains(s []string, e string) bool { } // canRunnerMatchLabels check Agent Label array will match runner label array -func canRunnerMatchLabels(jobLabels []string, runnerLabels []string) bool { +func canRunnerMatchLabels(jobLabels []string, runnerLabels []string, noDefaultLabels bool) bool { + allLabels := runnerLabels + if !noDefaultLabels { + allLabels = append(allLabels, reservedLabels...) + } for _, jobLabel := range jobLabels { - if !contains(runnerLabels, jobLabel) && !contains(reservedLabels, jobLabel) { + if !contains(allLabels, jobLabel) { return false } } @@ -665,7 +690,7 @@ func (s *githubRunnerScaler) GetWorkflowQueueLength(ctx context.Context) (int64, return -1, err } for _, job := range jobs { - if (job.Status == "queued" || job.Status == "in_progress") && canRunnerMatchLabels(job.Labels, s.metadata.labels) { + if (job.Status == "queued" || job.Status == "in_progress") && canRunnerMatchLabels(job.Labels, s.metadata.labels, s.metadata.noDefaultLabels) { queueCount++ } } diff --git a/pkg/scalers/github_runner_scaler_test.go b/pkg/scalers/github_runner_scaler_test.go index 808ac78562c..0f88a330c87 100644 --- a/pkg/scalers/github_runner_scaler_test.go +++ b/pkg/scalers/github_runner_scaler_test.go @@ -22,6 +22,7 @@ var testGhWorkflowResponse = `{"total_count":1,"workflow_runs":[{"id":30433642," var ghDeadJob = `{"id":30433642,"name":"Build","node_id":"MDEyOldvcmtmbG93IFJ1bjI2OTI4OQ==","check_suite_id":42,"check_suite_node_id":"MDEwOkNoZWNrU3VpdGU0Mg==","head_branch":"master","head_sha":"acb5820ced9479c074f688cc328bf03f341a511d","run_number":562,"event":"push","status":"completed","conclusion":null,"workflow_id":159038,"url":"https://api.github.com/repos/octo-org/octo-repo/actions/runs/30433642","html_url":"https://github.com/octo-org/octo-repo/actions/runs/30433642","pull_requests":[],"created_at":"2020-01-22T19:33:08Z","updated_at":"2020-01-22T19:33:08Z","actor":{"login":"octocat","id":1,"node_id":"MDQ6VXNlcjE=","avatar_url":"https://github.com/images/error/octocat_happy.gif","gravatar_id":"","url":"https://api.github.com/users/octocat","html_url":"https://github.com/octocat","followers_url":"https://api.github.com/users/octocat/followers","following_url":"https://api.github.com/users/octocat/following{/other_user}","gists_url":"https://api.github.com/users/octocat/gists{/gist_id}","starred_url":"https://api.github.com/users/octocat/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/octocat/subscriptions","organizations_url":"https://api.github.com/users/octocat/orgs","repos_url":"https://api.github.com/users/octocat/repos","events_url":"https://api.github.com/users/octocat/events{/privacy}","received_events_url":"https://api.github.com/users/octocat/received_events","type":"User","site_admin":false},"run_attempt":1,"run_started_at":"2020-01-22T19:33:08Z","triggering_actor":{"login":"octocat","id":1,"node_id":"MDQ6VXNlcjE=","avatar_url":"https://github.com/images/error/octocat_happy.gif","gravatar_id":"","url":"https://api.github.com/users/octocat","html_url":"https://github.com/octocat","followers_url":"https://api.github.com/users/octocat/followers","following_url":"https://api.github.com/users/octocat/following{/other_user}","gists_url":"https://api.github.com/users/octocat/gists{/gist_id}","starred_url":"https://api.github.com/users/octocat/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/octocat/subscriptions","organizations_url":"https://api.github.com/users/octocat/orgs","repos_url":"https://api.github.com/users/octocat/repos","events_url":"https://api.github.com/users/octocat/events{/privacy}","received_events_url":"https://api.github.com/users/octocat/received_events","type":"User","site_admin":false},"jobs_url":"https://api.github.com/repos/octo-org/octo-repo/actions/runs/30433642/jobs","logs_url":"https://api.github.com/repos/octo-org/octo-repo/actions/runs/30433642/logs","check_suite_url":"https://api.github.com/repos/octo-org/octo-repo/check-suites/414944374","artifacts_url":"https://api.github.com/repos/octo-org/octo-repo/actions/runs/30433642/artifacts","cancel_url":"https://api.github.com/repos/octo-org/octo-repo/actions/runs/30433642/cancel","rerun_url":"https://api.github.com/repos/octo-org/octo-repo/actions/runs/30433642/rerun","workflow_url":"https://api.github.com/repos/octo-org/octo-repo/actions/workflows/159038","head_commit":{"id":"acb5820ced9479c074f688cc328bf03f341a511d","tree_id":"d23f6eedb1e1b9610bbc754ddb5197bfe7271223","message":"Create linter.yaml","timestamp":"2020-01-22T19:33:05Z","author":{"name":"Octo Cat","email":"octocat@github.com"},"committer":{"name":"GitHub","email":"noreply@github.com"}},"repository":{"id":1296269,"node_id":"MDEwOlJlcG9zaXRvcnkxMjk2MjY5","name":"Hello-World","full_name":"octocat/Hello-World","owner":{"login":"octocat","id":1,"node_id":"MDQ6VXNlcjE=","avatar_url":"https://github.com/images/error/octocat_happy.gif","gravatar_id":"","url":"https://api.github.com/users/octocat","html_url":"https://github.com/octocat","followers_url":"https://api.github.com/users/octocat/followers","following_url":"https://api.github.com/users/octocat/following{/other_user}","gists_url":"https://api.github.com/users/octocat/gists{/gist_id}","starred_url":"https://api.github.com/users/octocat/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/octocat/subscriptions","organizations_url":"https://api.github.com/users/octocat/orgs","repos_url":"https://api.github.com/users/octocat/repos","events_url":"https://api.github.com/users/octocat/events{/privacy}","received_events_url":"https://api.github.com/users/octocat/received_events","type":"User","site_admin":false},"private":false,"html_url":"https://github.com/octocat/Hello-World","description":"This your first repo!","fork":false,"url":"https://api.github.com/repos/octocat/Hello-World","archive_url":"https://api.github.com/repos/octocat/Hello-World/{archive_format}{/ref}","assignees_url":"https://api.github.com/repos/octocat/Hello-World/assignees{/user}","blobs_url":"https://api.github.com/repos/octocat/Hello-World/git/blobs{/sha}","branches_url":"https://api.github.com/repos/octocat/Hello-World/branches{/branch}","collaborators_url":"https://api.github.com/repos/octocat/Hello-World/collaborators{/collaborator}","comments_url":"https://api.github.com/repos/octocat/Hello-World/comments{/number}","commits_url":"https://api.github.com/repos/octocat/Hello-World/commits{/sha}","compare_url":"https://api.github.com/repos/octocat/Hello-World/compare/{base}...{head}","contents_url":"https://api.github.com/repos/octocat/Hello-World/contents/{+path}","contributors_url":"https://api.github.com/repos/octocat/Hello-World/contributors","deployments_url":"https://api.github.com/repos/octocat/Hello-World/deployments","downloads_url":"https://api.github.com/repos/octocat/Hello-World/downloads","events_url":"https://api.github.com/repos/octocat/Hello-World/events","forks_url":"https://api.github.com/repos/octocat/Hello-World/forks","git_commits_url":"https://api.github.com/repos/octocat/Hello-World/git/commits{/sha}","git_refs_url":"https://api.github.com/repos/octocat/Hello-World/git/refs{/sha}","git_tags_url":"https://api.github.com/repos/octocat/Hello-World/git/tags{/sha}","git_url":"git:github.com/octocat/Hello-World.git","issue_comment_url":"https://api.github.com/repos/octocat/Hello-World/issues/comments{/number}","issue_events_url":"https://api.github.com/repos/octocat/Hello-World/issues/events{/number}","issues_url":"https://api.github.com/repos/octocat/Hello-World/issues{/number}","keys_url":"https://api.github.com/repos/octocat/Hello-World/keys{/key_id}","labels_url":"https://api.github.com/repos/octocat/Hello-World/labels{/name}","languages_url":"https://api.github.com/repos/octocat/Hello-World/languages","merges_url":"https://api.github.com/repos/octocat/Hello-World/merges","milestones_url":"https://api.github.com/repos/octocat/Hello-World/milestones{/number}","notifications_url":"https://api.github.com/repos/octocat/Hello-World/notifications{?since,all,participating}","pulls_url":"https://api.github.com/repos/octocat/Hello-World/pulls{/number}","releases_url":"https://api.github.com/repos/octocat/Hello-World/releases{/id}","ssh_url":"git@github.com:octocat/Hello-World.git","stargazers_url":"https://api.github.com/repos/octocat/Hello-World/stargazers","statuses_url":"https://api.github.com/repos/octocat/Hello-World/statuses/{sha}","subscribers_url":"https://api.github.com/repos/octocat/Hello-World/subscribers","subscription_url":"https://api.github.com/repos/octocat/Hello-World/subscription","tags_url":"https://api.github.com/repos/octocat/Hello-World/tags","teams_url":"https://api.github.com/repos/octocat/Hello-World/teams","trees_url":"https://api.github.com/repos/octocat/Hello-World/git/trees{/sha}","hooks_url":"http://api.github.com/repos/octocat/Hello-World/hooks"},"head_repository":{"id":217723378,"node_id":"MDEwOlJlcG9zaXRvcnkyMTc3MjMzNzg=","name":"octo-repo","full_name":"octo-org/octo-repo","private":true,"owner":{"login":"octocat","id":1,"node_id":"MDQ6VXNlcjE=","avatar_url":"https://github.com/images/error/octocat_happy.gif","gravatar_id":"","url":"https://api.github.com/users/octocat","html_url":"https://github.com/octocat","followers_url":"https://api.github.com/users/octocat/followers","following_url":"https://api.github.com/users/octocat/following{/other_user}","gists_url":"https://api.github.com/users/octocat/gists{/gist_id}","starred_url":"https://api.github.com/users/octocat/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/octocat/subscriptions","organizations_url":"https://api.github.com/users/octocat/orgs","repos_url":"https://api.github.com/users/octocat/repos","events_url":"https://api.github.com/users/octocat/events{/privacy}","received_events_url":"https://api.github.com/users/octocat/received_events","type":"User","site_admin":false},"html_url":"https://github.com/octo-org/octo-repo","description":null,"fork":false,"url":"https://api.github.com/repos/octo-org/octo-repo","forks_url":"https://api.github.com/repos/octo-org/octo-repo/forks","keys_url":"https://api.github.com/repos/octo-org/octo-repo/keys{/key_id}","collaborators_url":"https://api.github.com/repos/octo-org/octo-repo/collaborators{/collaborator}","teams_url":"https://api.github.com/repos/octo-org/octo-repo/teams","hooks_url":"https://api.github.com/repos/octo-org/octo-repo/hooks","issue_events_url":"https://api.github.com/repos/octo-org/octo-repo/issues/events{/number}","events_url":"https://api.github.com/repos/octo-org/octo-repo/events","assignees_url":"https://api.github.com/repos/octo-org/octo-repo/assignees{/user}","branches_url":"https://api.github.com/repos/octo-org/octo-repo/branches{/branch}","tags_url":"https://api.github.com/repos/octo-org/octo-repo/tags","blobs_url":"https://api.github.com/repos/octo-org/octo-repo/git/blobs{/sha}","git_tags_url":"https://api.github.com/repos/octo-org/octo-repo/git/tags{/sha}","git_refs_url":"https://api.github.com/repos/octo-org/octo-repo/git/refs{/sha}","trees_url":"https://api.github.com/repos/octo-org/octo-repo/git/trees{/sha}","statuses_url":"https://api.github.com/repos/octo-org/octo-repo/statuses/{sha}","languages_url":"https://api.github.com/repos/octo-org/octo-repo/languages","stargazers_url":"https://api.github.com/repos/octo-org/octo-repo/stargazers","contributors_url":"https://api.github.com/repos/octo-org/octo-repo/contributors","subscribers_url":"https://api.github.com/repos/octo-org/octo-repo/subscribers","subscription_url":"https://api.github.com/repos/octo-org/octo-repo/subscription","commits_url":"https://api.github.com/repos/octo-org/octo-repo/commits{/sha}","git_commits_url":"https://api.github.com/repos/octo-org/octo-repo/git/commits{/sha}","comments_url":"https://api.github.com/repos/octo-org/octo-repo/comments{/number}","issue_comment_url":"https://api.github.com/repos/octo-org/octo-repo/issues/comments{/number}","contents_url":"https://api.github.com/repos/octo-org/octo-repo/contents/{+path}","compare_url":"https://api.github.com/repos/octo-org/octo-repo/compare/{base}...{head}","merges_url":"https://api.github.com/repos/octo-org/octo-repo/merges","archive_url":"https://api.github.com/repos/octo-org/octo-repo/{archive_format}{/ref}","downloads_url":"https://api.github.com/repos/octo-org/octo-repo/downloads","issues_url":"https://api.github.com/repos/octo-org/octo-repo/issues{/number}","pulls_url":"https://api.github.com/repos/octo-org/octo-repo/pulls{/number}","milestones_url":"https://api.github.com/repos/octo-org/octo-repo/milestones{/number}","notifications_url":"https://api.github.com/repos/octo-org/octo-repo/notifications{?since,all,participating}","labels_url":"https://api.github.com/repos/octo-org/octo-repo/labels{/name}","releases_url":"https://api.github.com/repos/octo-org/octo-repo/releases{/id}","deployments_url":"https://api.github.com/repos/octo-org/octo-repo/deployments"}}` var testGhUserReposResponse = `[{"id":1296269,"node_id":"MDEwOlJlcG9zaXRvcnkxMjk2MjY5","name":"Hello-World-2","full_name":"octocat/Hello-World","owner":{"login":"octocat","id":1,"node_id":"MDQ6VXNlcjE=","avatar_url":"https://github.com/images/error/octocat_happy.gif","gravatar_id":"","url":"https://api.github.com/users/octocat","html_url":"https://github.com/octocat","followers_url":"https://api.github.com/users/octocat/followers","following_url":"https://api.github.com/users/octocat/following{/other_user}","gists_url":"https://api.github.com/users/octocat/gists{/gist_id}","starred_url":"https://api.github.com/users/octocat/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/octocat/subscriptions","organizations_url":"https://api.github.com/users/octocat/orgs","repos_url":"https://api.github.com/users/octocat/repos","events_url":"https://api.github.com/users/octocat/events{/privacy}","received_events_url":"https://api.github.com/users/octocat/received_events","type":"User","site_admin":false},"private":false,"html_url":"https://github.com/octocat/Hello-World","description":"This your first repo!","fork":false,"url":"https://api.github.com/repos/octocat/Hello-World","archive_url":"https://api.github.com/repos/octocat/Hello-World/{archive_format}{/ref}","assignees_url":"https://api.github.com/repos/octocat/Hello-World/assignees{/user}","blobs_url":"https://api.github.com/repos/octocat/Hello-World/git/blobs{/sha}","branches_url":"https://api.github.com/repos/octocat/Hello-World/branches{/branch}","collaborators_url":"https://api.github.com/repos/octocat/Hello-World/collaborators{/collaborator}","comments_url":"https://api.github.com/repos/octocat/Hello-World/comments{/number}","commits_url":"https://api.github.com/repos/octocat/Hello-World/commits{/sha}","compare_url":"https://api.github.com/repos/octocat/Hello-World/compare/{base}...{head}","contents_url":"https://api.github.com/repos/octocat/Hello-World/contents/{+path}","contributors_url":"https://api.github.com/repos/octocat/Hello-World/contributors","deployments_url":"https://api.github.com/repos/octocat/Hello-World/deployments","downloads_url":"https://api.github.com/repos/octocat/Hello-World/downloads","events_url":"https://api.github.com/repos/octocat/Hello-World/events","forks_url":"https://api.github.com/repos/octocat/Hello-World/forks","git_commits_url":"https://api.github.com/repos/octocat/Hello-World/git/commits{/sha}","git_refs_url":"https://api.github.com/repos/octocat/Hello-World/git/refs{/sha}","git_tags_url":"https://api.github.com/repos/octocat/Hello-World/git/tags{/sha}","git_url":"git:github.com/octocat/Hello-World.git","issue_comment_url":"https://api.github.com/repos/octocat/Hello-World/issues/comments{/number}","issue_events_url":"https://api.github.com/repos/octocat/Hello-World/issues/events{/number}","issues_url":"https://api.github.com/repos/octocat/Hello-World/issues{/number}","keys_url":"https://api.github.com/repos/octocat/Hello-World/keys{/key_id}","labels_url":"https://api.github.com/repos/octocat/Hello-World/labels{/name}","languages_url":"https://api.github.com/repos/octocat/Hello-World/languages","merges_url":"https://api.github.com/repos/octocat/Hello-World/merges","milestones_url":"https://api.github.com/repos/octocat/Hello-World/milestones{/number}","notifications_url":"https://api.github.com/repos/octocat/Hello-World/notifications{?since,all,participating}","pulls_url":"https://api.github.com/repos/octocat/Hello-World/pulls{/number}","releases_url":"https://api.github.com/repos/octocat/Hello-World/releases{/id}","ssh_url":"git@github.com:octocat/Hello-World.git","stargazers_url":"https://api.github.com/repos/octocat/Hello-World/stargazers","statuses_url":"https://api.github.com/repos/octocat/Hello-World/statuses/{sha}","subscribers_url":"https://api.github.com/repos/octocat/Hello-World/subscribers","subscription_url":"https://api.github.com/repos/octocat/Hello-World/subscription","tags_url":"https://api.github.com/repos/octocat/Hello-World/tags","teams_url":"https://api.github.com/repos/octocat/Hello-World/teams","trees_url":"https://api.github.com/repos/octocat/Hello-World/git/trees{/sha}","clone_url":"https://github.com/octocat/Hello-World.git","mirror_url":"git:git.example.com/octocat/Hello-World","hooks_url":"https://api.github.com/repos/octocat/Hello-World/hooks","svn_url":"https://svn.github.com/octocat/Hello-World","homepage":"https://github.com","language":null,"forks_count":9,"stargazers_count":80,"watchers_count":80,"size":108,"default_branch":"master","open_issues_count":0,"is_template":true,"topics":["octocat","atom","electron","api"],"has_issues":true,"has_projects":true,"has_wiki":true,"has_pages":false,"has_downloads":true,"archived":false,"disabled":false,"visibility":"public","pushed_at":"2011-01-26T19:06:43Z","created_at":"2011-01-26T19:01:12Z","updated_at":"2011-01-26T19:14:43Z","permissions":{"admin":false,"push":false,"pull":true},"allow_rebase_merge":true,"template_repository":null,"temp_clone_token":"ABTLWHOULUVAXGTRYU7OC2876QJ2O","allow_squash_merge":true,"allow_auto_merge":false,"delete_branch_on_merge":true,"allow_merge_commit":true,"subscribers_count":42,"network_count":0,"license":{"key":"mit","name":"MIT License","url":"https://api.github.com/licenses/mit","spdx_id":"MIT","node_id":"MDc6TGljZW5zZW1pdA==","html_url":"https://github.com/licenses/mit"},"forks":1,"open_issues":1,"watchers":1},{"id":1296269,"node_id":"MDEwOlJlcG9zaXRvcnkxMjk2MjY5","name":"Hello-World","full_name":"octocat/Hello-World","owner":{"login":"octocat","id":1,"node_id":"MDQ6VXNlcjE=","avatar_url":"https://github.com/images/error/octocat_happy.gif","gravatar_id":"","url":"https://api.github.com/users/octocat","html_url":"https://github.com/octocat","followers_url":"https://api.github.com/users/octocat/followers","following_url":"https://api.github.com/users/octocat/following{/other_user}","gists_url":"https://api.github.com/users/octocat/gists{/gist_id}","starred_url":"https://api.github.com/users/octocat/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/octocat/subscriptions","organizations_url":"https://api.github.com/users/octocat/orgs","repos_url":"https://api.github.com/users/octocat/repos","events_url":"https://api.github.com/users/octocat/events{/privacy}","received_events_url":"https://api.github.com/users/octocat/received_events","type":"User","site_admin":false},"private":false,"html_url":"https://github.com/octocat/Hello-World","description":"This your first repo!","fork":false,"url":"https://api.github.com/repos/octocat/Hello-World","archive_url":"https://api.github.com/repos/octocat/Hello-World/{archive_format}{/ref}","assignees_url":"https://api.github.com/repos/octocat/Hello-World/assignees{/user}","blobs_url":"https://api.github.com/repos/octocat/Hello-World/git/blobs{/sha}","branches_url":"https://api.github.com/repos/octocat/Hello-World/branches{/branch}","collaborators_url":"https://api.github.com/repos/octocat/Hello-World/collaborators{/collaborator}","comments_url":"https://api.github.com/repos/octocat/Hello-World/comments{/number}","commits_url":"https://api.github.com/repos/octocat/Hello-World/commits{/sha}","compare_url":"https://api.github.com/repos/octocat/Hello-World/compare/{base}...{head}","contents_url":"https://api.github.com/repos/octocat/Hello-World/contents/{+path}","contributors_url":"https://api.github.com/repos/octocat/Hello-World/contributors","deployments_url":"https://api.github.com/repos/octocat/Hello-World/deployments","downloads_url":"https://api.github.com/repos/octocat/Hello-World/downloads","events_url":"https://api.github.com/repos/octocat/Hello-World/events","forks_url":"https://api.github.com/repos/octocat/Hello-World/forks","git_commits_url":"https://api.github.com/repos/octocat/Hello-World/git/commits{/sha}","git_refs_url":"https://api.github.com/repos/octocat/Hello-World/git/refs{/sha}","git_tags_url":"https://api.github.com/repos/octocat/Hello-World/git/tags{/sha}","git_url":"git:github.com/octocat/Hello-World.git","issue_comment_url":"https://api.github.com/repos/octocat/Hello-World/issues/comments{/number}","issue_events_url":"https://api.github.com/repos/octocat/Hello-World/issues/events{/number}","issues_url":"https://api.github.com/repos/octocat/Hello-World/issues{/number}","keys_url":"https://api.github.com/repos/octocat/Hello-World/keys{/key_id}","labels_url":"https://api.github.com/repos/octocat/Hello-World/labels{/name}","languages_url":"https://api.github.com/repos/octocat/Hello-World/languages","merges_url":"https://api.github.com/repos/octocat/Hello-World/merges","milestones_url":"https://api.github.com/repos/octocat/Hello-World/milestones{/number}","notifications_url":"https://api.github.com/repos/octocat/Hello-World/notifications{?since,all,participating}","pulls_url":"https://api.github.com/repos/octocat/Hello-World/pulls{/number}","releases_url":"https://api.github.com/repos/octocat/Hello-World/releases{/id}","ssh_url":"git@github.com:octocat/Hello-World.git","stargazers_url":"https://api.github.com/repos/octocat/Hello-World/stargazers","statuses_url":"https://api.github.com/repos/octocat/Hello-World/statuses/{sha}","subscribers_url":"https://api.github.com/repos/octocat/Hello-World/subscribers","subscription_url":"https://api.github.com/repos/octocat/Hello-World/subscription","tags_url":"https://api.github.com/repos/octocat/Hello-World/tags","teams_url":"https://api.github.com/repos/octocat/Hello-World/teams","trees_url":"https://api.github.com/repos/octocat/Hello-World/git/trees{/sha}","clone_url":"https://github.com/octocat/Hello-World.git","mirror_url":"git:git.example.com/octocat/Hello-World","hooks_url":"https://api.github.com/repos/octocat/Hello-World/hooks","svn_url":"https://svn.github.com/octocat/Hello-World","homepage":"https://github.com","language":null,"forks_count":9,"stargazers_count":80,"watchers_count":80,"size":108,"default_branch":"master","open_issues_count":0,"is_template":true,"topics":["octocat","atom","electron","api"],"has_issues":true,"has_projects":true,"has_wiki":true,"has_pages":false,"has_downloads":true,"archived":false,"disabled":false,"visibility":"public","pushed_at":"2011-01-26T19:06:43Z","created_at":"2011-01-26T19:01:12Z","updated_at":"2011-01-26T19:14:43Z","permissions":{"admin":false,"push":false,"pull":true},"allow_rebase_merge":true,"template_repository":null,"temp_clone_token":"ABTLWHOULUVAXGTRYU7OC2876QJ2O","allow_squash_merge":true,"allow_auto_merge":false,"delete_branch_on_merge":true,"allow_merge_commit":true,"subscribers_count":42,"network_count":0,"license":{"key":"mit","name":"MIT License","url":"https://api.github.com/licenses/mit","spdx_id":"MIT","node_id":"MDc6TGljZW5zZW1pdA==","html_url":"https://github.com/licenses/mit"},"forks":1,"open_issues":1,"watchers":1}]` var testGhWFJobResponse = `{"total_count":1,"jobs":[{"id":399444496,"run_id":29679449,"run_url":"https://api.github.com/repos/octo-org/octo-repo/actions/runs/29679449","node_id":"MDEyOldvcmtmbG93IEpvYjM5OTQ0NDQ5Ng==","head_sha":"f83a356604ae3c5d03e1b46ef4d1ca77d64a90b0","url":"https://api.github.com/repos/octo-org/octo-repo/actions/jobs/399444496","html_url":"https://github.com/octo-org/octo-repo/runs/399444496","status":"queued","conclusion":"success","started_at":"2020-01-20T17:42:40Z","completed_at":"2020-01-20T17:44:39Z","name":"build","steps":[{"name":"Set up job","status":"completed","conclusion":"success","number":1,"started_at":"2020-01-20T09:42:40.000-08:00","completed_at":"2020-01-20T09:42:41.000-08:00"},{"name":"Run actions/checkout@v2","status":"queued","conclusion":"success","number":2,"started_at":"2020-01-20T09:42:41.000-08:00","completed_at":"2020-01-20T09:42:45.000-08:00"},{"name":"Set up Ruby","status":"completed","conclusion":"success","number":3,"started_at":"2020-01-20T09:42:45.000-08:00","completed_at":"2020-01-20T09:42:45.000-08:00"},{"name":"Run actions/cache@v3","status":"completed","conclusion":"success","number":4,"started_at":"2020-01-20T09:42:45.000-08:00","completed_at":"2020-01-20T09:42:48.000-08:00"},{"name":"Install Bundler","status":"completed","conclusion":"success","number":5,"started_at":"2020-01-20T09:42:48.000-08:00","completed_at":"2020-01-20T09:42:52.000-08:00"},{"name":"Install Gems","status":"completed","conclusion":"success","number":6,"started_at":"2020-01-20T09:42:52.000-08:00","completed_at":"2020-01-20T09:42:53.000-08:00"},{"name":"Run Tests","status":"completed","conclusion":"success","number":7,"started_at":"2020-01-20T09:42:53.000-08:00","completed_at":"2020-01-20T09:42:59.000-08:00"},{"name":"Deploy to Heroku","status":"completed","conclusion":"success","number":8,"started_at":"2020-01-20T09:42:59.000-08:00","completed_at":"2020-01-20T09:44:39.000-08:00"},{"name":"Post actions/cache@v3","status":"completed","conclusion":"success","number":16,"started_at":"2020-01-20T09:44:39.000-08:00","completed_at":"2020-01-20T09:44:39.000-08:00"},{"name":"Complete job","status":"completed","conclusion":"success","number":17,"started_at":"2020-01-20T09:44:39.000-08:00","completed_at":"2020-01-20T09:44:39.000-08:00"}],"check_run_url":"https://api.github.com/repos/octo-org/octo-repo/check-runs/399444496","labels":["self-hosted","foo","bar"],"runner_id":1,"runner_name":"my runner","runner_group_id":2,"runner_group_name":"my runner group","workflow_name":"CI","head_branch":"main"}]}` +var testGhWFJobResponseOnlyCustomLabels = `{"total_count":1,"jobs":[{"id":399444496,"run_id":29679449,"run_url":"https://api.github.com/repos/octo-org/octo-repo/actions/runs/29679449","node_id":"MDEyOldvcmtmbG93IEpvYjM5OTQ0NDQ5Ng==","head_sha":"f83a356604ae3c5d03e1b46ef4d1ca77d64a90b0","url":"https://api.github.com/repos/octo-org/octo-repo/actions/jobs/399444496","html_url":"https://github.com/octo-org/octo-repo/runs/399444496","status":"queued","conclusion":"success","started_at":"2020-01-20T17:42:40Z","completed_at":"2020-01-20T17:44:39Z","name":"build","steps":[{"name":"Set up job","status":"completed","conclusion":"success","number":1,"started_at":"2020-01-20T09:42:40.000-08:00","completed_at":"2020-01-20T09:42:41.000-08:00"},{"name":"Run actions/checkout@v2","status":"queued","conclusion":"success","number":2,"started_at":"2020-01-20T09:42:41.000-08:00","completed_at":"2020-01-20T09:42:45.000-08:00"},{"name":"Set up Ruby","status":"completed","conclusion":"success","number":3,"started_at":"2020-01-20T09:42:45.000-08:00","completed_at":"2020-01-20T09:42:45.000-08:00"},{"name":"Run actions/cache@v3","status":"completed","conclusion":"success","number":4,"started_at":"2020-01-20T09:42:45.000-08:00","completed_at":"2020-01-20T09:42:48.000-08:00"},{"name":"Install Bundler","status":"completed","conclusion":"success","number":5,"started_at":"2020-01-20T09:42:48.000-08:00","completed_at":"2020-01-20T09:42:52.000-08:00"},{"name":"Install Gems","status":"completed","conclusion":"success","number":6,"started_at":"2020-01-20T09:42:52.000-08:00","completed_at":"2020-01-20T09:42:53.000-08:00"},{"name":"Run Tests","status":"completed","conclusion":"success","number":7,"started_at":"2020-01-20T09:42:53.000-08:00","completed_at":"2020-01-20T09:42:59.000-08:00"},{"name":"Deploy to Heroku","status":"completed","conclusion":"success","number":8,"started_at":"2020-01-20T09:42:59.000-08:00","completed_at":"2020-01-20T09:44:39.000-08:00"},{"name":"Post actions/cache@v3","status":"completed","conclusion":"success","number":16,"started_at":"2020-01-20T09:44:39.000-08:00","completed_at":"2020-01-20T09:44:39.000-08:00"},{"name":"Complete job","status":"completed","conclusion":"success","number":17,"started_at":"2020-01-20T09:44:39.000-08:00","completed_at":"2020-01-20T09:44:39.000-08:00"}],"check_run_url":"https://api.github.com/repos/octo-org/octo-repo/check-runs/399444496","labels":["foo","bar"],"runner_id":1,"runner_name":"my runner","runner_group_id":2,"runner_group_name":"my runner group","workflow_name":"CI","head_branch":"main"}]}` type parseGitHubRunnerMetadataTestData struct { testName string @@ -179,6 +180,10 @@ func generateResponseExceed30Repos() []byte { } func apiStubHandler(hasRateLeft bool, exceeds30Repos bool) *httptest.Server { + return apiStubHandlerCustomJob(hasRateLeft, exceeds30Repos, testGhWFJobResponse) +} + +func apiStubHandlerCustomJob(hasRateLeft bool, exceeds30Repos bool, jobResponse string) *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { futureReset := time.Now() futureReset = futureReset.Add(time.Minute * 30) @@ -190,7 +195,7 @@ func apiStubHandler(hasRateLeft bool, exceeds30Repos bool) *httptest.Server { w.WriteHeader(http.StatusForbidden) } if strings.HasSuffix(r.URL.String(), "jobs") { - _, _ = w.Write([]byte(testGhWFJobResponse)) + _, _ = w.Write([]byte(jobResponse)) w.WriteHeader(http.StatusOK) } if strings.HasSuffix(r.URL.String(), "runs") { @@ -243,7 +248,7 @@ func TestNewGitHubRunnerScaler_QueueLength_NoRateLeft(t *testing.T) { tRepo := []string{"test"} mockGitHubRunnerScaler.metadata.repos = tRepo - _, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + _, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err == nil { t.Fail() @@ -267,7 +272,7 @@ func TestNewGitHubRunnerScaler_QueueLength_SingleRepo(t *testing.T) { mockGitHubRunnerScaler.metadata.repos = []string{"test"} mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} - queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err != nil { t.Fail() @@ -291,7 +296,7 @@ func TestNewGitHubRunnerScaler_QueueLength_SingleRepo_ExtraRunnerLabels(t *testi mockGitHubRunnerScaler.metadata.repos = []string{"test"} mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar", "other", "more"} - queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err != nil { t.Fail() @@ -315,7 +320,7 @@ func TestNewGitHubRunnerScaler_QueueLength_SingleRepo_LessRunnerLabels(t *testin mockGitHubRunnerScaler.metadata.repos = []string{"test"} mockGitHubRunnerScaler.metadata.labels = []string{"foo"} - queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err != nil { t.Fail() @@ -325,6 +330,105 @@ func TestNewGitHubRunnerScaler_QueueLength_SingleRepo_LessRunnerLabels(t *testin t.Fail() } } +func TestNewGitHubRunnerScaler_QueueLength_SingleRepo_WithScalerDefaultLabels_WithJobDefaultLabels(t *testing.T) { + var apiStub = apiStubHandler(true, false) + + meta := getGitHubTestMetaData(apiStub.URL) + + mockGitHubRunnerScaler := githubRunnerScaler{ + metadata: meta, + httpClient: http.DefaultClient, + } + + mockGitHubRunnerScaler.metadata.repos = []string{"test"} + mockGitHubRunnerScaler.metadata.noDefaultLabels = false + mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} + + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) + + if err != nil { + t.Fail() + } + + if queueLen != 1 { + t.Fail() + } +} + +func TestNewGitHubRunnerScaler_QueueLength_SingleRepo_WithScalerDefaultLabels_WithoutJobDefaultLabels(t *testing.T) { + var apiStub = apiStubHandlerCustomJob(true, false, testGhWFJobResponseOnlyCustomLabels) + + meta := getGitHubTestMetaData(apiStub.URL) + + mockGitHubRunnerScaler := githubRunnerScaler{ + metadata: meta, + httpClient: http.DefaultClient, + } + + mockGitHubRunnerScaler.metadata.repos = []string{"test"} + mockGitHubRunnerScaler.metadata.noDefaultLabels = false + mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} + + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) + + if err != nil { + t.Fail() + } + + if queueLen != 1 { + t.Fail() + } +} + +func TestNewGitHubRunnerScaler_QueueLength_SingleRepo_WithoutScalerDefaultLabels_WithJobDefaultLabels(t *testing.T) { + var apiStub = apiStubHandler(true, false) + + meta := getGitHubTestMetaData(apiStub.URL) + + mockGitHubRunnerScaler := githubRunnerScaler{ + metadata: meta, + httpClient: http.DefaultClient, + } + + mockGitHubRunnerScaler.metadata.repos = []string{"test"} + mockGitHubRunnerScaler.metadata.noDefaultLabels = true + mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} + + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) + + if err != nil { + t.Fail() + } + + if queueLen != 0 { + t.Fail() + } +} + +func TestNewGitHubRunnerScaler_QueueLength_SingleRepo_WithoutScalerDefaultLabels_WithoutJobDefaultLabels(t *testing.T) { + var apiStub = apiStubHandlerCustomJob(true, false, testGhWFJobResponseOnlyCustomLabels) + + meta := getGitHubTestMetaData(apiStub.URL) + + mockGitHubRunnerScaler := githubRunnerScaler{ + metadata: meta, + httpClient: http.DefaultClient, + } + + mockGitHubRunnerScaler.metadata.repos = []string{"test"} + mockGitHubRunnerScaler.metadata.noDefaultLabels = true + mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} + + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) + + if err != nil { + t.Fail() + } + + if queueLen != 1 { + t.Fail() + } +} func TestNewGitHubRunnerScaler_404(t *testing.T) { var apiStub = apiStubHandler404() @@ -338,7 +442,7 @@ func TestNewGitHubRunnerScaler_404(t *testing.T) { mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} - _, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + _, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err == nil { t.Fail() @@ -360,7 +464,7 @@ func TestNewGitHubRunnerScaler_BadConnection(t *testing.T) { mockGitHubRunnerScaler.metadata.repos = []string{"test"} mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} - _, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + _, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err == nil { t.Fail() @@ -383,7 +487,7 @@ func TestNewGitHubRunnerScaler_BadURL(t *testing.T) { mockGitHubRunnerScaler.metadata.repos = []string{"test"} mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} - _, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + _, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err == nil { t.Fail() @@ -407,7 +511,7 @@ func TestNewGitHubRunnerScaler_QueueLength_NoRunnerLabels(t *testing.T) { mockGitHubRunnerScaler.metadata.repos = []string{"test"} - queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err != nil { fmt.Println(err) @@ -434,7 +538,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_Assigned(t *testing.T) { mockGitHubRunnerScaler.metadata.runnerScope = ORG mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} - queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err != nil { fmt.Println(err) @@ -462,7 +566,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_Assigned_OneBad(t *testing. mockGitHubRunnerScaler.metadata.runnerScope = ORG mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} - queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err != nil { fmt.Println(err) @@ -487,7 +591,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledUserRepos(t *testing. mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} - queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err != nil { fmt.Println(err) @@ -511,7 +615,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledUserRepos_Exceeds30En mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} - queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err != nil { fmt.Println(err) t.Fail() @@ -535,7 +639,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledOrgRepos(t *testing.T mockGitHubRunnerScaler.metadata.runnerScope = ORG mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} - queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err != nil { fmt.Println(err) @@ -560,7 +664,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledEntRepos(t *testing.T mockGitHubRunnerScaler.metadata.runnerScope = ENT mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} - queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err != nil { fmt.Println(err) @@ -584,7 +688,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledBadRepos(t *testing.T mockGitHubRunnerScaler.metadata.runnerScope = "bad" - _, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + _, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err == nil { t.Fail() @@ -605,7 +709,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledRepos_NoRate(t *testi httpClient: http.DefaultClient, } - _, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + _, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.Background()) if err == nil { t.Fail() diff --git a/pkg/scalers/kubernetes_workload_scaler.go b/pkg/scalers/kubernetes_workload_scaler.go index a3ceeaa0405..3023d8ed08a 100644 --- a/pkg/scalers/kubernetes_workload_scaler.go +++ b/pkg/scalers/kubernetes_workload_scaler.go @@ -33,7 +33,7 @@ var phasesCountedAsTerminated = []corev1.PodPhase{ type kubernetesWorkloadMetadata struct { PodSelector string `keda:"name=podSelector, order=triggerMetadata"` - Value float64 `keda:"name=value, order=triggerMetadata"` + Value float64 `keda:"name=value, order=triggerMetadata, default=0"` ActivationValue float64 `keda:"name=activationValue, order=triggerMetadata, default=0"` namespace string @@ -72,17 +72,13 @@ func NewKubernetesWorkloadScaler(kubeClient client.Client, config *scalersconfig func parseKubernetesWorkloadMetadata(config *scalersconfig.ScalerConfig) (kubernetesWorkloadMetadata, error) { meta := kubernetesWorkloadMetadata{} - err := config.TypedConfig(&meta) - if err != nil { - return meta, fmt.Errorf("error parsing kubernetes workload metadata: %w", err) - } - meta.namespace = config.ScalableObjectNamespace meta.triggerIndex = config.TriggerIndex meta.asMetricSource = config.AsMetricSource - if meta.asMetricSource { - meta.Value = 0 + err := config.TypedConfig(&meta) + if err != nil { + return meta, fmt.Errorf("error parsing kubernetes workload metadata: %w", err) } selector, err := labels.Parse(meta.PodSelector) @@ -91,10 +87,6 @@ func parseKubernetesWorkloadMetadata(config *scalersconfig.ScalerConfig) (kubern } meta.podSelector = selector - if err := meta.Validate(); err != nil { - return meta, err - } - return meta, nil } diff --git a/pkg/scalers/loki_scaler.go b/pkg/scalers/loki_scaler.go index 11a43e5384c..dff08107f02 100644 --- a/pkg/scalers/loki_scaler.go +++ b/pkg/scalers/loki_scaler.go @@ -19,37 +19,27 @@ import ( ) const ( - lokiServerAddress = "serverAddress" - lokiQuery = "query" - lokiThreshold = "threshold" - lokiActivationThreshold = "activationThreshold" - lokiNamespace = "namespace" - tenantName = "tenantName" + defaultIgnoreNullValues = true tenantNameHeaderKey = "X-Scope-OrgID" - lokiIgnoreNullValues = "ignoreNullValues" -) - -var ( - lokiDefaultIgnoreNullValues = true ) type lokiScaler struct { metricType v2.MetricTargetType - metadata *lokiMetadata + metadata lokiMetadata httpClient *http.Client logger logr.Logger } type lokiMetadata struct { - serverAddress string - query string - threshold float64 - activationThreshold float64 - lokiAuth *authentication.AuthMeta - triggerIndex int - tenantName string - ignoreNullValues bool - unsafeSsl bool + ServerAddress string `keda:"name=serverAddress,order=triggerMetadata"` + Query string `keda:"name=query,order=triggerMetadata"` + Threshold float64 `keda:"name=threshold,order=triggerMetadata"` + ActivationThreshold float64 `keda:"name=activationThreshold,order=triggerMetadata,default=0"` + TenantName string `keda:"name=tenantName,order=triggerMetadata,optional"` + IgnoreNullValues bool `keda:"name=ignoreNullValues,order=triggerMetadata,default=true"` + UnsafeSsl bool `keda:"name=unsafeSsl,order=triggerMetadata,default=false"` + TriggerIndex int + Auth *authentication.AuthMeta } type lokiQueryResult struct { @@ -57,113 +47,54 @@ type lokiQueryResult struct { Data struct { ResultType string `json:"resultType"` Result []struct { - Metric struct { - } `json:"metric"` - Value []interface{} `json:"value"` + Metric struct{} `json:"metric"` + Value []interface{} `json:"value"` } `json:"result"` } `json:"data"` } -// NewLokiScaler returns a new lokiScaler func NewLokiScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) if err != nil { return nil, fmt.Errorf("error getting scaler metric type: %w", err) } - logger := InitializeLogger(config, "loki_scaler") - meta, err := parseLokiMetadata(config) if err != nil { return nil, fmt.Errorf("error parsing loki metadata: %w", err) } - httpClient := kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, meta.unsafeSsl) + httpClient := kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, meta.UnsafeSsl) return &lokiScaler{ metricType: metricType, metadata: meta, httpClient: httpClient, - logger: logger, + logger: InitializeLogger(config, "loki_scaler"), }, nil } -func parseLokiMetadata(config *scalersconfig.ScalerConfig) (meta *lokiMetadata, err error) { - meta = &lokiMetadata{} - - if val, ok := config.TriggerMetadata[lokiServerAddress]; ok && val != "" { - meta.serverAddress = val - } else { - return nil, fmt.Errorf("no %s given", lokiServerAddress) - } - - if val, ok := config.TriggerMetadata[lokiQuery]; ok && val != "" { - meta.query = val - } else { - return nil, fmt.Errorf("no %s given", lokiQuery) - } - - if val, ok := config.TriggerMetadata[lokiThreshold]; ok && val != "" { - t, err := strconv.ParseFloat(val, 64) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %w", lokiThreshold, err) - } - - meta.threshold = t - } else { - if config.AsMetricSource { - meta.threshold = 0 - } else { - return nil, fmt.Errorf("no %s given", lokiThreshold) - } - } - - meta.activationThreshold = 0 - if val, ok := config.TriggerMetadata[lokiActivationThreshold]; ok { - t, err := strconv.ParseFloat(val, 64) - if err != nil { - return nil, fmt.Errorf("activationThreshold parsing error %w", err) - } - - meta.activationThreshold = t - } - - if val, ok := config.TriggerMetadata[tenantName]; ok && val != "" { - meta.tenantName = val +func parseLokiMetadata(config *scalersconfig.ScalerConfig) (lokiMetadata, error) { + meta := lokiMetadata{} + err := config.TypedConfig(&meta) + if err != nil { + return meta, fmt.Errorf("error parsing loki metadata: %w", err) } - meta.ignoreNullValues = lokiDefaultIgnoreNullValues - if val, ok := config.TriggerMetadata[lokiIgnoreNullValues]; ok && val != "" { - ignoreNullValues, err := strconv.ParseBool(val) - if err != nil { - return nil, fmt.Errorf("err incorrect value for ignoreNullValues given: %s please use true or false", val) - } - meta.ignoreNullValues = ignoreNullValues + if config.AsMetricSource { + meta.Threshold = 0 } - meta.unsafeSsl = false - if val, ok := config.TriggerMetadata[unsafeSsl]; ok && val != "" { - unsafeSslValue, err := strconv.ParseBool(val) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %w", unsafeSsl, err) - } - - meta.unsafeSsl = unsafeSslValue - } - - meta.triggerIndex = config.TriggerIndex - - // parse auth configs from ScalerConfig auth, err := authentication.GetAuthConfigs(config.TriggerMetadata, config.AuthParams) if err != nil { - return nil, err + return meta, err } - meta.lokiAuth = auth + meta.Auth = auth + meta.TriggerIndex = config.TriggerIndex return meta, nil } -// Close returns a nil error func (s *lokiScaler) Close(context.Context) error { if s.httpClient != nil { s.httpClient.CloseIdleConnections() @@ -171,100 +102,101 @@ func (s *lokiScaler) Close(context.Context) error { return nil } -// GetMetricSpecForScaling returns the MetricSpec for the Horizontal Pod Autoscaler func (s *lokiScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, "loki"), + Name: GenerateMetricNameWithIndex(s.metadata.TriggerIndex, "loki"), }, - Target: GetMetricTargetMili(s.metricType, s.metadata.threshold), - } - metricSpec := v2.MetricSpec{ - External: externalMetric, Type: externalMetricType, + Target: GetMetricTargetMili(s.metricType, s.metadata.Threshold), } + metricSpec := v2.MetricSpec{External: externalMetric, Type: externalMetricType} return []v2.MetricSpec{metricSpec} } -// ExecuteLokiQuery returns the result of the LogQL query execution func (s *lokiScaler) ExecuteLokiQuery(ctx context.Context) (float64, error) { - u, err := url.ParseRequestURI(s.metadata.serverAddress) + u, err := url.ParseRequestURI(s.metadata.ServerAddress) if err != nil { return -1, err } u.Path = "/loki/api/v1/query" - - u.RawQuery = url.Values{ - "query": []string{s.metadata.query}, - }.Encode() + u.RawQuery = url.Values{"query": []string{s.metadata.Query}}.Encode() req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) if err != nil { return -1, err } - if s.metadata.lokiAuth != nil && s.metadata.lokiAuth.EnableBearerAuth { - req.Header.Add("Authorization", authentication.GetBearerToken(s.metadata.lokiAuth)) - } else if s.metadata.lokiAuth != nil && s.metadata.lokiAuth.EnableBasicAuth { - req.SetBasicAuth(s.metadata.lokiAuth.Username, s.metadata.lokiAuth.Password) + if s.metadata.Auth != nil { + if s.metadata.Auth.EnableBearerAuth { + req.Header.Add("Authorization", authentication.GetBearerToken(s.metadata.Auth)) + } else if s.metadata.Auth.EnableBasicAuth { + req.SetBasicAuth(s.metadata.Auth.Username, s.metadata.Auth.Password) + } } - if s.metadata.tenantName != "" { - req.Header.Add(tenantNameHeaderKey, s.metadata.tenantName) + if s.metadata.TenantName != "" { + req.Header.Add(tenantNameHeaderKey, s.metadata.TenantName) } - r, err := s.httpClient.Do(req) + resp, err := s.httpClient.Do(req) if err != nil { return -1, err } - defer r.Body.Close() + defer resp.Body.Close() - b, err := io.ReadAll(r.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return -1, err } - if !(r.StatusCode >= 200 && r.StatusCode <= 299) { - err := fmt.Errorf("loki query api returned error. status: %d response: %s", r.StatusCode, string(b)) - s.logger.Error(err, "loki query api returned error") - return -1, err + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return -1, fmt.Errorf("loki query api returned error. status: %d response: %s", resp.StatusCode, string(body)) } var result lokiQueryResult - err = json.Unmarshal(b, &result) - if err != nil { + if err := json.Unmarshal(body, &result); err != nil { return -1, err } - var v float64 = -1 + return s.parseQueryResult(result) +} - // allow for zero element or single element result sets +func (s *lokiScaler) parseQueryResult(result lokiQueryResult) (float64, error) { if len(result.Data.Result) == 0 { - if s.metadata.ignoreNullValues { + if s.metadata.IgnoreNullValues { return 0, nil } return -1, fmt.Errorf("loki metrics may be lost, the result is empty") - } else if len(result.Data.Result) > 1 { - return -1, fmt.Errorf("loki query %s returned multiple elements", s.metadata.query) } - valueLen := len(result.Data.Result[0].Value) - if valueLen == 0 { - if s.metadata.ignoreNullValues { + if len(result.Data.Result) > 1 { + return -1, fmt.Errorf("loki query %s returned multiple elements", s.metadata.Query) + } + + values := result.Data.Result[0].Value + if len(values) == 0 { + if s.metadata.IgnoreNullValues { return 0, nil } return -1, fmt.Errorf("loki metrics may be lost, the value list is empty") - } else if valueLen < 2 { - return -1, fmt.Errorf("loki query %s didn't return enough values", s.metadata.query) } - val := result.Data.Result[0].Value[1] - if val != nil { - str := val.(string) - v, err = strconv.ParseFloat(str, 64) - if err != nil { - s.logger.Error(err, "Error converting loki value", "loki_value", str) - return -1, err - } + if len(values) < 2 { + return -1, fmt.Errorf("loki query %s didn't return enough values", s.metadata.Query) + } + + if values[1] == nil { + return 0, nil + } + + str, ok := values[1].(string) + if !ok { + return -1, fmt.Errorf("failed to parse loki value as string") + } + + v, err := strconv.ParseFloat(str, 64) + if err != nil { + return -1, fmt.Errorf("error converting loki value %s: %w", str, err) } return v, nil @@ -279,6 +211,5 @@ func (s *lokiScaler) GetMetricsAndActivity(ctx context.Context, metricName strin } metric := GenerateMetricInMili(metricName, val) - - return []external_metrics.ExternalMetricValue{metric}, val > s.metadata.activationThreshold, nil + return []external_metrics.ExternalMetricValue{metric}, val > s.metadata.ActivationThreshold, nil } diff --git a/pkg/scalers/loki_scaler_test.go b/pkg/scalers/loki_scaler_test.go index 06f95f46419..e5f8082269d 100644 --- a/pkg/scalers/loki_scaler_test.go +++ b/pkg/scalers/loki_scaler_test.go @@ -38,7 +38,7 @@ var testLokiMetadata = []parseLokiMetadataTestData{ {map[string]string{"serverAddress": "http://localhost:3100", "threshold": "1", "query": ""}, true}, // ignoreNullValues with wrong value {map[string]string{"serverAddress": "http://localhost:3100", "threshold": "1", "query": "sum(rate({filename=\"/var/log/syslog\"}[1m])) by (level)", "ignoreNullValues": "xxxx"}, true}, - + // with unsafeSsl {map[string]string{"serverAddress": "https://localhost:3100", "threshold": "1", "query": "sum(rate({filename=\"/var/log/syslog\"}[1m])) by (level)", "unsafeSsl": "true"}, false}, } @@ -83,14 +83,14 @@ func TestLokiScalerAuthParams(t *testing.T) { } if err == nil { - if meta.lokiAuth.EnableBasicAuth && !strings.Contains(testData.metadata["authModes"], "basic") { + if meta.Auth.EnableBasicAuth && !strings.Contains(testData.metadata["authModes"], "basic") { t.Error("wrong auth mode detected") } } } } -type lokiQromQueryResultTestData struct { +type lokiQueryResultTestData struct { name string bodyStr string responseStatus int @@ -100,7 +100,7 @@ type lokiQromQueryResultTestData struct { unsafeSsl bool } -var testLokiQueryResult = []lokiQromQueryResultTestData{ +var testLokiQueryResult = []lokiQueryResultTestData{ { name: "no results", bodyStr: `{}`, @@ -189,17 +189,16 @@ func TestLokiScalerExecuteLogQLQuery(t *testing.T) { t.Run(testData.name, func(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, _ *http.Request) { writer.WriteHeader(testData.responseStatus) - if _, err := writer.Write([]byte(testData.bodyStr)); err != nil { t.Fatal(err) } })) scaler := lokiScaler{ - metadata: &lokiMetadata{ - serverAddress: server.URL, - ignoreNullValues: testData.ignoreNullValues, - unsafeSsl: testData.unsafeSsl, + metadata: lokiMetadata{ + ServerAddress: server.URL, + IgnoreNullValues: testData.ignoreNullValues, + UnsafeSsl: testData.unsafeSsl, }, httpClient: http.DefaultClient, logger: logr.Discard(), @@ -208,7 +207,6 @@ func TestLokiScalerExecuteLogQLQuery(t *testing.T) { value, err := scaler.ExecuteLokiQuery(context.TODO()) assert.Equal(t, testData.expectedValue, value) - if testData.isError { assert.Error(t, err) } else { @@ -219,7 +217,7 @@ func TestLokiScalerExecuteLogQLQuery(t *testing.T) { } func TestLokiScalerTenantHeader(t *testing.T) { - testData := lokiQromQueryResultTestData{ + testData := lokiQueryResultTestData{ name: "no values", bodyStr: `{"data":{"result":[]}}`, responseStatus: http.StatusOK, @@ -238,15 +236,14 @@ func TestLokiScalerTenantHeader(t *testing.T) { })) scaler := lokiScaler{ - metadata: &lokiMetadata{ - serverAddress: server.URL, - tenantName: tenantName, - ignoreNullValues: testData.ignoreNullValues, + metadata: lokiMetadata{ + ServerAddress: server.URL, + TenantName: tenantName, + IgnoreNullValues: testData.ignoreNullValues, }, httpClient: http.DefaultClient, } _, err := scaler.ExecuteLokiQuery(context.TODO()) - assert.NoError(t, err) } diff --git a/pkg/scalers/mongo_scaler.go b/pkg/scalers/mongo_scaler.go index f30b8fb97ec..25d2a62ede9 100644 --- a/pkg/scalers/mongo_scaler.go +++ b/pkg/scalers/mongo_scaler.go @@ -6,8 +6,6 @@ import ( "fmt" "net" "net/url" - "strconv" - "strings" "time" "github.com/go-logr/logr" @@ -22,60 +20,45 @@ import ( kedautil "github.com/kedacore/keda/v2/pkg/util" ) -// mongoDBScaler is support for mongoDB in keda. type mongoDBScaler struct { metricType v2.MetricTargetType - metadata *mongoDBMetadata + metadata mongoDBMetadata client *mongo.Client logger logr.Logger } -// mongoDBMetadata specify mongoDB scaler params. type mongoDBMetadata struct { - // The string is used by connected with mongoDB. - // +optional - connectionString string - // Specify the prefix to connect to the mongoDB server, default value `mongodb`, if the connectionString be provided, don't need to specify this param. - // +optional - scheme string - // Specify the host to connect to the mongoDB server,if the connectionString be provided, don't need to specify this param. - // +optional - host string - // Specify the port to connect to the mongoDB server,if the connectionString be provided, don't need to specify this param. - // +optional - port string - // Specify the username to connect to the mongoDB server,if the connectionString be provided, don't need to specify this param. - // +optional - username string - // Specify the password to connect to the mongoDB server,if the connectionString be provided, don't need to specify this param. - // +optional - password string - - // The name of the database to be queried. - // +required - dbName string - // The name of the collection to be queried. - // +required - collection string - // A mongoDB filter doc,used by specify DB. - // +required - query string - // A threshold that is used as targetAverageValue in HPA - // +required - queryValue int64 - // A threshold that is used to check if scaler is active - // +optional - activationQueryValue int64 - - // The index of the scaler inside the ScaledObject - // +internal - triggerIndex int + ConnectionString string `keda:"name=connectionString,order=authParams;triggerMetadata;resolvedEnv,optional"` + Scheme string `keda:"name=scheme,order=authParams;triggerMetadata,default=mongodb,optional"` + Host string `keda:"name=host,order=authParams;triggerMetadata,optional"` + Port string `keda:"name=port,order=authParams;triggerMetadata,optional"` + Username string `keda:"name=username,order=authParams;triggerMetadata,optional"` + Password string `keda:"name=password,order=authParams;triggerMetadata;resolvedEnv,optional"` + DBName string `keda:"name=dbName,order=authParams;triggerMetadata"` + Collection string `keda:"name=collection,order=triggerMetadata"` + Query string `keda:"name=query,order=triggerMetadata"` + QueryValue int64 `keda:"name=queryValue,order=triggerMetadata"` + ActivationQueryValue int64 `keda:"name=activationQueryValue,order=triggerMetadata,default=0"` + TriggerIndex int } -// Default variables and settings -const ( - mongoDBDefaultTimeOut = 10 * time.Second -) +func (m *mongoDBMetadata) Validate() error { + if m.ConnectionString == "" { + if m.Host == "" { + return fmt.Errorf("no host given") + } + if m.Port == "" && m.Scheme != "mongodb+srv" { + return fmt.Errorf("no port given") + } + if m.Username == "" { + return fmt.Errorf("no username given") + } + if m.Password == "" { + return fmt.Errorf("no password given") + } + } + return nil +} // NewMongoDBScaler creates a new mongoDB scaler func NewMongoDBScaler(ctx context.Context, config *scalersconfig.ScalerConfig) (Scaler, error) { @@ -84,22 +67,14 @@ func NewMongoDBScaler(ctx context.Context, config *scalersconfig.ScalerConfig) ( return nil, fmt.Errorf("error getting scaler metric type: %w", err) } - ctx, cancel := context.WithTimeout(ctx, mongoDBDefaultTimeOut) - defer cancel() - - meta, connStr, err := parseMongoDBMetadata(config) + meta, err := parseMongoDBMetadata(config) if err != nil { - return nil, fmt.Errorf("failed to parsing mongoDB metadata, because of %w", err) + return nil, fmt.Errorf("error parsing mongodb metadata: %w", err) } - opt := options.Client().ApplyURI(connStr) - client, err := mongo.Connect(ctx, opt) + client, err := createMongoDBClient(ctx, meta) if err != nil { - return nil, fmt.Errorf("failed to establish connection with mongoDB, because of %w", err) - } - - if err = client.Ping(ctx, readpref.Primary()); err != nil { - return nil, fmt.Errorf("failed to ping mongoDB, because of %w", err) + return nil, fmt.Errorf("error creating mongodb client: %w", err) } return &mongoDBScaler{ @@ -110,171 +85,101 @@ func NewMongoDBScaler(ctx context.Context, config *scalersconfig.ScalerConfig) ( }, nil } -func parseMongoDBMetadata(config *scalersconfig.ScalerConfig) (*mongoDBMetadata, string, error) { - var connStr string - var err error - // setting default metadata +func parseMongoDBMetadata(config *scalersconfig.ScalerConfig) (mongoDBMetadata, error) { meta := mongoDBMetadata{} - - // parse metaData from ScaledJob config - if val, ok := config.TriggerMetadata["collection"]; ok { - meta.collection = val - } else { - return nil, "", fmt.Errorf("no collection given") + err := config.TypedConfig(&meta) + if err != nil { + return meta, fmt.Errorf("error parsing mongodb metadata: %w", err) } - if val, ok := config.TriggerMetadata["query"]; ok { - meta.query = val - } else { - return nil, "", fmt.Errorf("no query given") - } + meta.TriggerIndex = config.TriggerIndex + return meta, nil +} - if val, ok := config.TriggerMetadata["queryValue"]; ok { - queryValue, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return nil, "", fmt.Errorf("failed to convert %v to int, because of %w", val, err) - } - meta.queryValue = queryValue +func createMongoDBClient(ctx context.Context, meta mongoDBMetadata) (*mongo.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + var connString string + if meta.ConnectionString != "" { + connString = meta.ConnectionString } else { - if config.AsMetricSource { - meta.queryValue = 0 - } else { - return nil, "", fmt.Errorf("no queryValue given") + host := meta.Host + if meta.Scheme != "mongodb+srv" { + host = net.JoinHostPort(meta.Host, meta.Port) } - } - - meta.activationQueryValue = 0 - if val, ok := config.TriggerMetadata["activationQueryValue"]; ok { - activationQueryValue, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return nil, "", fmt.Errorf("failed to convert %v to int, because of %w", val, err) + u := &url.URL{ + Scheme: meta.Scheme, + User: url.UserPassword(meta.Username, meta.Password), + Host: host, + Path: meta.DBName, } - meta.activationQueryValue = activationQueryValue + connString = u.String() } - dbName, err := GetFromAuthOrMeta(config, "dbName") + client, err := mongo.Connect(ctx, options.Client().ApplyURI(connString)) if err != nil { - return nil, "", err + return nil, fmt.Errorf("failed to create mongodb client: %w", err) } - meta.dbName = dbName - - // Resolve connectionString - switch { - case config.AuthParams["connectionString"] != "": - meta.connectionString = config.AuthParams["connectionString"] - case config.TriggerMetadata["connectionStringFromEnv"] != "": - meta.connectionString = config.ResolvedEnv[config.TriggerMetadata["connectionStringFromEnv"]] - default: - meta.connectionString = "" - scheme, err := GetFromAuthOrMeta(config, "scheme") - if err != nil { - meta.scheme = "mongodb" - } else { - meta.scheme = scheme - } - - host, err := GetFromAuthOrMeta(config, "host") - if err != nil { - return nil, "", err - } - meta.host = host - - if !strings.Contains(scheme, "mongodb+srv") { - port, err := GetFromAuthOrMeta(config, "port") - if err != nil { - return nil, "", err - } - meta.port = port - } - username, err := GetFromAuthOrMeta(config, "username") - if err != nil { - return nil, "", err - } - meta.username = username - - if config.AuthParams["password"] != "" { - meta.password = config.AuthParams["password"] - } else if config.TriggerMetadata["passwordFromEnv"] != "" { - meta.password = config.ResolvedEnv[config.TriggerMetadata["passwordFromEnv"]] - } - if len(meta.password) == 0 { - return nil, "", fmt.Errorf("no password given") - } - } - - switch { - case meta.connectionString != "": - connStr = meta.connectionString - case meta.scheme == "mongodb+srv": - // nosemgrep: db-connection-string - connStr = fmt.Sprintf("%s://%s:%s@%s/%s", meta.scheme, url.QueryEscape(meta.username), url.QueryEscape(meta.password), meta.host, meta.dbName) - default: - addr := net.JoinHostPort(meta.host, meta.port) - // nosemgrep: db-connection-string - connStr = fmt.Sprintf("%s://%s:%s@%s/%s", meta.scheme, url.QueryEscape(meta.username), url.QueryEscape(meta.password), addr, meta.dbName) + err = client.Ping(ctx, readpref.Primary()) + if err != nil { + return nil, fmt.Errorf("failed to ping mongodb: %w", err) } - meta.triggerIndex = config.TriggerIndex - return &meta, connStr, nil + return client, nil } -// Close disposes of mongoDB connections func (s *mongoDBScaler) Close(ctx context.Context) error { if s.client != nil { err := s.client.Disconnect(ctx) if err != nil { - s.logger.Error(err, fmt.Sprintf("failed to close mongoDB connection, because of %v", err)) + s.logger.Error(err, "Error closing mongodb connection") return err } } - return nil } -// getQueryResult query mongoDB by meta.query func (s *mongoDBScaler) getQueryResult(ctx context.Context) (int64, error) { - ctx, cancel := context.WithTimeout(ctx, mongoDBDefaultTimeOut) + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() - filter, err := json2BsonDoc(s.metadata.query) + collection := s.client.Database(s.metadata.DBName).Collection(s.metadata.Collection) + + filter, err := json2BsonDoc(s.metadata.Query) if err != nil { - s.logger.Error(err, fmt.Sprintf("failed to convert query param to bson.Doc, because of %v", err)) - return 0, err + return 0, fmt.Errorf("failed to parse query: %w", err) } - docsNum, err := s.client.Database(s.metadata.dbName).Collection(s.metadata.collection).CountDocuments(ctx, filter) + count, err := collection.CountDocuments(ctx, filter) if err != nil { - s.logger.Error(err, fmt.Sprintf("failed to query %v in %v, because of %v", s.metadata.dbName, s.metadata.collection, err)) - return 0, err + return 0, fmt.Errorf("failed to execute query: %w", err) } - return docsNum, nil + return count, nil } -// GetMetricsAndActivity query from mongoDB,and return to external metrics func (s *mongoDBScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { num, err := s.getQueryResult(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("failed to inspect momgoDB, because of %w", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("failed to inspect mongodb: %w", err) } metric := GenerateMetricInMili(metricName, float64(num)) - return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationQueryValue, nil + return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.ActivationQueryValue, nil } -// GetMetricSpecForScaling get the query value for scaling func (s *mongoDBScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { + metricName := kedautil.NormalizeString(fmt.Sprintf("mongodb-%s", s.metadata.Collection)) externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("mongodb-%s", s.metadata.collection))), + Name: GenerateMetricNameWithIndex(s.metadata.TriggerIndex, metricName), }, - Target: GetMetricTarget(s.metricType, s.metadata.queryValue), - } - metricSpec := v2.MetricSpec{ - External: externalMetric, Type: externalMetricType, + Target: GetMetricTarget(s.metricType, s.metadata.QueryValue), } + metricSpec := v2.MetricSpec{External: externalMetric, Type: externalMetricType} return []v2.MetricSpec{metricSpec} } diff --git a/pkg/scalers/mongo_scaler_test.go b/pkg/scalers/mongo_scaler_test.go index fd9f54f8337..c749b9f7ae4 100644 --- a/pkg/scalers/mongo_scaler_test.go +++ b/pkg/scalers/mongo_scaler_test.go @@ -5,8 +5,8 @@ import ( "testing" "github.com/go-logr/logr" - "github.com/stretchr/testify/assert" "go.mongodb.org/mongo-driver/mongo" + v2 "k8s.io/api/autoscaling/v2" "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" ) @@ -100,7 +100,7 @@ var mongoDBMetricIdentifiers = []mongoDBMetricIdentifier{ func TestParseMongoDBMetadata(t *testing.T) { for _, testData := range testMONGODBMetadata { - _, _, err := parseMongoDBMetadata(&scalersconfig.ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) + _, err := parseMongoDBMetadata(&scalersconfig.ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) if err != nil && !testData.raisesError { t.Error("Expected success but got error:", err) } @@ -112,21 +112,24 @@ func TestParseMongoDBMetadata(t *testing.T) { func TestParseMongoDBConnectionString(t *testing.T) { for _, testData := range mongoDBConnectionStringTestDatas { - _, connStr, err := parseMongoDBMetadata(&scalersconfig.ScalerConfig{ResolvedEnv: testData.metadataTestData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams}) + _, err := parseMongoDBMetadata(&scalersconfig.ScalerConfig{ + ResolvedEnv: testData.metadataTestData.resolvedEnv, + TriggerMetadata: testData.metadataTestData.metadata, + AuthParams: testData.metadataTestData.authParams, + }) if err != nil { t.Error("Expected success but got error:", err) } - assert.Equal(t, testData.connectionString, connStr) } } func TestMongoDBGetMetricSpecForScaling(t *testing.T) { for _, testData := range mongoDBMetricIdentifiers { - meta, _, err := parseMongoDBMetadata(&scalersconfig.ScalerConfig{ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex}) + meta, err := parseMongoDBMetadata(&scalersconfig.ScalerConfig{ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, TriggerMetadata: testData.metadataTestData.metadata, TriggerIndex: testData.triggerIndex}) if err != nil { t.Fatal("Could not parse metadata:", err) } - mockMongoDBScaler := mongoDBScaler{"", meta, &mongo.Client{}, logr.Discard()} + mockMongoDBScaler := mongoDBScaler{metricType: v2.AverageValueMetricType, metadata: meta, client: &mongo.Client{}, logger: logr.Discard()} metricSpec := mockMongoDBScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/postgresql_scaler.go b/pkg/scalers/postgresql_scaler.go index f3133cc14ad..6ea0203b898 100644 --- a/pkg/scalers/postgresql_scaler.go +++ b/pkg/scalers/postgresql_scaler.go @@ -5,7 +5,6 @@ import ( "database/sql" "fmt" "regexp" - "strconv" "strings" "time" @@ -42,12 +41,46 @@ type postgreSQLScaler struct { } type postgreSQLMetadata struct { - targetQueryValue float64 - activationTargetQueryValue float64 - connection string - query string + TargetQueryValue float64 `keda:"name=targetQueryValue, order=triggerMetadata, optional"` + ActivationTargetQueryValue float64 `keda:"name=activationTargetQueryValue, order=triggerMetadata, optional"` + Connection string `keda:"name=connection, order=authParams;resolvedEnv, optional"` + Query string `keda:"name=query, order=triggerMetadata"` triggerIndex int azureAuthContext azureAuthContext + + Host string `keda:"name=host, order=authParams;triggerMetadata, optional"` + Port string `keda:"name=port, order=authParams;triggerMetadata, optional"` + UserName string `keda:"name=userName, order=authParams;triggerMetadata, optional"` + DBName string `keda:"name=dbName, order=authParams;triggerMetadata, optional"` + SslMode string `keda:"name=sslmode, order=authParams;triggerMetadata, optional"` + + Password string `keda:"name=password, order=authParams;resolvedEnv, optional"` +} + +func (p *postgreSQLMetadata) Validate() error { + if p.Connection == "" { + if p.Host == "" { + return fmt.Errorf("no host given") + } + + if p.Port == "" { + return fmt.Errorf("no port given") + } + + if p.UserName == "" { + return fmt.Errorf("no userName given") + } + + if p.DBName == "" { + return fmt.Errorf("no dbName given") + } + + if p.SslMode == "" { + return fmt.Errorf("no sslmode given") + } + } + + return nil } type azureAuthContext struct { @@ -83,66 +116,26 @@ func NewPostgreSQLScaler(ctx context.Context, config *scalersconfig.ScalerConfig } func parsePostgreSQLMetadata(logger logr.Logger, config *scalersconfig.ScalerConfig) (*postgreSQLMetadata, kedav1alpha1.AuthPodIdentity, error) { - meta := postgreSQLMetadata{} - + meta := &postgreSQLMetadata{} authPodIdentity := kedav1alpha1.AuthPodIdentity{} - - if val, ok := config.TriggerMetadata["query"]; ok { - meta.query = val - } else { - return nil, authPodIdentity, fmt.Errorf("no query given") - } - - if val, ok := config.TriggerMetadata["targetQueryValue"]; ok { - targetQueryValue, err := strconv.ParseFloat(val, 64) - if err != nil { - return nil, authPodIdentity, fmt.Errorf("queryValue parsing error %w", err) - } - meta.targetQueryValue = targetQueryValue - } else { - if config.AsMetricSource { - meta.targetQueryValue = 0 - } else { - return nil, authPodIdentity, fmt.Errorf("no targetQueryValue given") - } + meta.triggerIndex = config.TriggerIndex + if err := config.TypedConfig(meta); err != nil { + return nil, authPodIdentity, fmt.Errorf("error parsing postgresql metadata: %w", err) } - meta.activationTargetQueryValue = 0 - if val, ok := config.TriggerMetadata["activationTargetQueryValue"]; ok { - activationTargetQueryValue, err := strconv.ParseFloat(val, 64) - if err != nil { - return nil, authPodIdentity, fmt.Errorf("activationTargetQueryValue parsing error %w", err) - } - meta.activationTargetQueryValue = activationTargetQueryValue + if !config.AsMetricSource && meta.TargetQueryValue == 0 { + return nil, authPodIdentity, fmt.Errorf("no targetQueryValue given") } switch config.PodIdentity.Provider { case "", kedav1alpha1.PodIdentityProviderNone: - switch { - case config.AuthParams["connection"] != "": - meta.connection = config.AuthParams["connection"] - case config.TriggerMetadata["connectionFromEnv"] != "": - meta.connection = config.ResolvedEnv[config.TriggerMetadata["connectionFromEnv"]] - default: - params, err := buildConnArray(config) - if err != nil { - return nil, authPodIdentity, fmt.Errorf("failed to parse fields related to the connection") - } - - var password string - if config.AuthParams["password"] != "" { - password = config.AuthParams["password"] - } else if config.TriggerMetadata["passwordFromEnv"] != "" { - password = config.ResolvedEnv[config.TriggerMetadata["passwordFromEnv"]] - } - params = append(params, "password="+escapePostgreConnectionParameter(password)) - meta.connection = strings.Join(params, " ") + if meta.Connection == "" { + params := buildConnArray(meta) + params = append(params, "password="+escapePostgreConnectionParameter(meta.Password)) + meta.Connection = strings.Join(params, " ") } case kedav1alpha1.PodIdentityProviderAzureWorkload: - params, err := buildConnArray(config) - if err != nil { - return nil, authPodIdentity, fmt.Errorf("failed to parse fields related to the connection") - } + params := buildConnArray(meta) cred, err := azure.NewChainedCredential(logger, config.PodIdentity) if err != nil { @@ -152,51 +145,26 @@ func parsePostgreSQLMetadata(logger logr.Logger, config *scalersconfig.ScalerCon authPodIdentity = kedav1alpha1.AuthPodIdentity{Provider: config.PodIdentity.Provider} params = append(params, "%PASSWORD%") - meta.connection = strings.Join(params, " ") + meta.Connection = strings.Join(params, " ") } meta.triggerIndex = config.TriggerIndex - return &meta, authPodIdentity, nil + return meta, authPodIdentity, nil } -func buildConnArray(config *scalersconfig.ScalerConfig) ([]string, error) { +func buildConnArray(meta *postgreSQLMetadata) []string { var params []string + params = append(params, "host="+escapePostgreConnectionParameter(meta.Host)) + params = append(params, "port="+escapePostgreConnectionParameter(meta.Port)) + params = append(params, "user="+escapePostgreConnectionParameter(meta.UserName)) + params = append(params, "dbname="+escapePostgreConnectionParameter(meta.DBName)) + params = append(params, "sslmode="+escapePostgreConnectionParameter(meta.SslMode)) - host, err := GetFromAuthOrMeta(config, "host") - if err != nil { - return nil, err - } - - port, err := GetFromAuthOrMeta(config, "port") - if err != nil { - return nil, err - } - - userName, err := GetFromAuthOrMeta(config, "userName") - if err != nil { - return nil, err - } - - dbName, err := GetFromAuthOrMeta(config, "dbName") - if err != nil { - return nil, err - } - - sslmode, err := GetFromAuthOrMeta(config, "sslmode") - if err != nil { - return nil, err - } - params = append(params, "host="+escapePostgreConnectionParameter(host)) - params = append(params, "port="+escapePostgreConnectionParameter(port)) - params = append(params, "user="+escapePostgreConnectionParameter(userName)) - params = append(params, "dbname="+escapePostgreConnectionParameter(dbName)) - params = append(params, "sslmode="+escapePostgreConnectionParameter(sslmode)) - - return params, nil + return params } func getConnection(ctx context.Context, meta *postgreSQLMetadata, podIdentity kedav1alpha1.AuthPodIdentity, logger logr.Logger) (*sql.DB, error) { - connectionString := meta.connection + connectionString := meta.Connection if podIdentity.Provider == kedav1alpha1.PodIdentityProviderAzureWorkload { accessToken, err := getAzureAccessToken(ctx, meta, azureDatabasePostgresResource) @@ -204,7 +172,7 @@ func getConnection(ctx context.Context, meta *postgreSQLMetadata, podIdentity ke return nil, err } newPasswordField := "password=" + escapePostgreConnectionParameter(accessToken) - connectionString = passwordConnPattern.ReplaceAllString(meta.connection, newPasswordField) + connectionString = passwordConnPattern.ReplaceAllString(meta.Connection, newPasswordField) } db, err := sql.Open("pgx", connectionString) @@ -245,7 +213,7 @@ func (s *postgreSQLScaler) getActiveNumber(ctx context.Context) (float64, error) } } - err := s.connection.QueryRowContext(ctx, s.metadata.query).Scan(&id) + err := s.connection.QueryRowContext(ctx, s.metadata.Query).Scan(&id) if err != nil { s.logger.Error(err, fmt.Sprintf("could not query postgreSQL: %s", err)) return 0, fmt.Errorf("could not query postgreSQL: %w", err) @@ -259,7 +227,7 @@ func (s *postgreSQLScaler) GetMetricSpecForScaling(context.Context) []v2.MetricS Metric: v2.MetricIdentifier{ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString("postgresql")), }, - Target: GetMetricTargetMili(s.metricType, s.metadata.targetQueryValue), + Target: GetMetricTargetMili(s.metricType, s.metadata.TargetQueryValue), } metricSpec := v2.MetricSpec{ External: externalMetric, Type: externalMetricType, @@ -276,7 +244,7 @@ func (s *postgreSQLScaler) GetMetricsAndActivity(ctx context.Context, metricName metric := GenerateMetricInMili(metricName, num) - return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationTargetQueryValue, nil + return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.ActivationTargetQueryValue, nil } func escapePostgreConnectionParameter(str string) string { diff --git a/pkg/scalers/postgresql_scaler_test.go b/pkg/scalers/postgresql_scaler_test.go index 3f79d3a4319..da82ca6e3d4 100644 --- a/pkg/scalers/postgresql_scaler_test.go +++ b/pkg/scalers/postgresql_scaler_test.go @@ -85,8 +85,8 @@ func TestPosgresSQLConnectionStringGeneration(t *testing.T) { t.Fatal("Could not parse metadata:", err) } - if meta.connection != testData.connectionString { - t.Errorf("Error generating connectionString, expected '%s' and get '%s'", testData.connectionString, meta.connection) + if meta.Connection != testData.connectionString { + t.Errorf("Error generating connectionString, expected '%s' and get '%s'", testData.connectionString, meta.Connection) } } } @@ -104,8 +104,8 @@ func TestPodIdentityAzureWorkloadPosgresSQLConnectionStringGeneration(t *testing t.Fatal("Could not parse metadata:", err) } - if meta.connection != testData.connectionString { - t.Errorf("Error generating connectionString, expected '%s' and get '%s'", testData.connectionString, meta.connection) + if meta.Connection != testData.connectionString { + t.Errorf("Error generating connectionString, expected '%s' and get '%s'", testData.connectionString, meta.Connection) } } } diff --git a/pkg/scalers/predictkube_scaler.go b/pkg/scalers/predictkube_scaler.go index 78e2e5b446c..fe80fda1fca 100644 --- a/pkg/scalers/predictkube_scaler.go +++ b/pkg/scalers/predictkube_scaler.go @@ -83,18 +83,51 @@ type PredictKubeScaler struct { } type predictKubeMetadata struct { - predictHorizon time.Duration - historyTimeWindow time.Duration - stepDuration time.Duration - apiKey string - prometheusAddress string - prometheusAuth *authentication.AuthMeta - query string - threshold float64 - activationThreshold float64 - triggerIndex int + PrometheusAddress string `keda:"name=prometheusAddress, order=triggerMetadata"` + PrometheusAuth *authentication.Config `keda:"optional"` + Query string `keda:"name=query, order=triggerMetadata"` + PredictHorizon string `keda:"name=predictHorizon, order=triggerMetadata"` + QueryStep string `keda:"name=queryStep, order=triggerMetadata"` + HistoryTimeWindow string `keda:"name=historyTimeWindow, order=triggerMetadata"` + APIKey string `keda:"name=apiKey, order=authParams"` + Threshold float64 `keda:"name=threshold, order=triggerMetadata, optional"` + ActivationThreshold float64 `keda:"name=activationThreshold, order=triggerMetadata, optional"` + + predictHorizon time.Duration + historyTimeWindow time.Duration + stepDuration time.Duration + triggerIndex int } +func (p *predictKubeMetadata) Validate() error { + validate := validator.New() + err := validate.Var(p.PrometheusAddress, "url") + if err != nil { + return fmt.Errorf("invalid prometheusAddress") + } + + p.predictHorizon, err = str2duration.ParseDuration(p.PredictHorizon) + if err != nil { + return fmt.Errorf("predictHorizon parsing error %w", err) + } + + p.stepDuration, err = str2duration.ParseDuration(p.QueryStep) + if err != nil { + return fmt.Errorf("queryStep parsing error %w", err) + } + + p.historyTimeWindow, err = str2duration.ParseDuration(p.HistoryTimeWindow) + if err != nil { + return fmt.Errorf("historyTimeWindow parsing error %w", err) + } + + err = validate.Var(p.APIKey, "jwt") + if err != nil { + return fmt.Errorf("invalid apiKey") + } + + return nil +} func (s *PredictKubeScaler) setupClientConn() error { clientOpt, err := pc.SetGrpcClientOptions(grpcConf, &libs.Base{ @@ -108,7 +141,7 @@ func (s *PredictKubeScaler) setupClientConn() error { Enabled: false, }, }, - pc.InjectPublicClientMetadataInterceptor(s.metadata.apiKey), + pc.InjectPublicClientMetadataInterceptor(s.metadata.APIKey), ) if !grpcConf.Conn.Insecure { @@ -186,7 +219,7 @@ func (s *PredictKubeScaler) GetMetricSpecForScaling(context.Context) []v2.Metric Metric: v2.MetricIdentifier{ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName), }, - Target: GetMetricTargetMili(s.metricType, s.metadata.threshold), + Target: GetMetricTargetMili(s.metricType, s.metadata.Threshold), } metricSpec := v2.MetricSpec{ @@ -211,7 +244,7 @@ func (s *PredictKubeScaler) GetMetricsAndActivity(ctx context.Context, metricNam metric := GenerateMetricInMili(metricName, value) - return []external_metrics.ExternalMetricValue{metric}, activationValue > s.metadata.activationThreshold, nil + return []external_metrics.ExternalMetricValue{metric}, activationValue > s.metadata.ActivationThreshold, nil } func (s *PredictKubeScaler) doPredictRequest(ctx context.Context) (float64, float64, error) { @@ -257,7 +290,7 @@ func (s *PredictKubeScaler) doQuery(ctx context.Context) ([]*commonproto.Item, e Step: s.metadata.stepDuration, } - val, warns, err := s.api.QueryRange(ctx, s.metadata.query, r) + val, warns, err := s.api.QueryRange(ctx, s.metadata.Query, r) if len(warns) > 0 { s.logger.V(1).Info("warnings", warns) @@ -345,103 +378,17 @@ func (s *PredictKubeScaler) parsePrometheusResult(result model.Value) (out []*co } func parsePredictKubeMetadata(config *scalersconfig.ScalerConfig) (result *predictKubeMetadata, err error) { - validate := validator.New() - meta := predictKubeMetadata{} - - if val, ok := config.TriggerMetadata["query"]; ok { - if len(val) == 0 { - return nil, fmt.Errorf("no query given") - } - - meta.query = val - } else { - return nil, fmt.Errorf("no query given") - } - - if val, ok := config.TriggerMetadata["prometheusAddress"]; ok { - err = validate.Var(val, "url") - if err != nil { - return nil, fmt.Errorf("invalid prometheusAddress") - } - - meta.prometheusAddress = val - } else { - return nil, fmt.Errorf("no prometheusAddress given") - } - - if val, ok := config.TriggerMetadata["predictHorizon"]; ok { - predictHorizon, err := str2duration.ParseDuration(val) - if err != nil { - return nil, fmt.Errorf("predictHorizon parsing error %w", err) - } - meta.predictHorizon = predictHorizon - } else { - return nil, fmt.Errorf("no predictHorizon given") - } - - if val, ok := config.TriggerMetadata["queryStep"]; ok { - stepDuration, err := str2duration.ParseDuration(val) - if err != nil { - return nil, fmt.Errorf("queryStep parsing error %w", err) - } - meta.stepDuration = stepDuration - } else { - return nil, fmt.Errorf("no queryStep given") - } - - if val, ok := config.TriggerMetadata["historyTimeWindow"]; ok { - historyTimeWindow, err := str2duration.ParseDuration(val) - if err != nil { - return nil, fmt.Errorf("historyTimeWindow parsing error %w", err) - } - meta.historyTimeWindow = historyTimeWindow - } else { - return nil, fmt.Errorf("no historyTimeWindow given") - } - - if val, ok := config.TriggerMetadata["threshold"]; ok { - threshold, err := strconv.ParseFloat(val, 64) - if err != nil { - return nil, fmt.Errorf("threshold parsing error %w", err) - } - meta.threshold = threshold - } else { - if config.AsMetricSource { - meta.threshold = 0 - } else { - return nil, fmt.Errorf("no threshold given") - } + meta := &predictKubeMetadata{} + if err := config.TypedConfig(meta); err != nil { + return nil, fmt.Errorf("error parsing arango metadata: %w", err) } - meta.activationThreshold = 0 - if val, ok := config.TriggerMetadata["activationThreshold"]; ok { - activationThreshold, err := strconv.ParseFloat(val, 64) - if err != nil { - return nil, fmt.Errorf("activationThreshold parsing error %w", err) - } - meta.activationThreshold = activationThreshold + if !config.AsMetricSource && meta.Threshold == 0 { + return nil, fmt.Errorf("no threshold given") } meta.triggerIndex = config.TriggerIndex - - if val, ok := config.AuthParams["apiKey"]; ok { - err = validate.Var(val, "jwt") - if err != nil { - return nil, fmt.Errorf("invalid apiKey") - } - - meta.apiKey = val - } else { - return nil, fmt.Errorf("no api key given") - } - - // parse auth configs from ScalerConfig - auth, err := authentication.GetAuthConfigs(config.TriggerMetadata, config.AuthParams) - if err != nil { - return nil, err - } - meta.prometheusAuth = auth - return &meta, nil + return meta, nil } func (s *PredictKubeScaler) ping(ctx context.Context) (err error) { @@ -454,14 +401,14 @@ func (s *PredictKubeScaler) initPredictKubePrometheusConn(ctx context.Context) ( // create http.RoundTripper with auth settings from ScalerConfig roundTripper, err := authentication.CreateHTTPRoundTripper( authentication.FastHTTP, - s.metadata.prometheusAuth, + s.metadata.PrometheusAuth.ToAuthMeta(), ) if err != nil { s.logger.V(1).Error(err, "init Prometheus client http transport") return err } client, err := api.NewClient(api.Config{ - Address: s.metadata.prometheusAddress, + Address: s.metadata.PrometheusAddress, RoundTripper: roundTripper, }) if err != nil { diff --git a/pkg/scalers/prometheus_scaler.go b/pkg/scalers/prometheus_scaler.go index 5a0516f42a0..521d5693442 100644 --- a/pkg/scalers/prometheus_scaler.go +++ b/pkg/scalers/prometheus_scaler.go @@ -25,18 +25,6 @@ import ( kedautil "github.com/kedacore/keda/v2/pkg/util" ) -const ( - promServerAddress = "serverAddress" - promQuery = "query" - promQueryParameters = "queryParameters" - promThreshold = "threshold" - promActivationThreshold = "activationThreshold" - promNamespace = "namespace" - promCustomHeaders = "customHeaders" - ignoreNullValues = "ignoreNullValues" - unsafeSsl = "unsafeSsl" -) - type prometheusScaler struct { metricType v2.MetricTargetType metadata *prometheusMetadata diff --git a/pkg/scalers/rabbitmq_scaler.go b/pkg/scalers/rabbitmq_scaler.go index 860228d3a99..f7ed365f52d 100644 --- a/pkg/scalers/rabbitmq_scaler.go +++ b/pkg/scalers/rabbitmq_scaler.go @@ -8,9 +8,7 @@ import ( "net/http" "net/url" "path" - "reflect" "regexp" - "strconv" "strings" "time" @@ -36,12 +34,14 @@ const ( rabbitModeTriggerConfigName = "mode" rabbitValueTriggerConfigName = "value" rabbitActivationValueTriggerConfigName = "activationValue" + rabbitModeUnknown = "Unknown" rabbitModeQueueLength = "QueueLength" rabbitModeMessageRate = "MessageRate" defaultRabbitMQQueueLength = 20 rabbitMetricType = "External" rabbitRootVhostPath = "/%2F" rmqTLSEnable = "enable" + rmqTLSDisable = "disable" ) const ( @@ -69,37 +69,155 @@ type rabbitMQScaler struct { } type rabbitMQMetadata struct { - queueName string - connectionName string // name used for the AMQP connection - mode string // QueueLength or MessageRate - value float64 // trigger value (queue length or publish/sec. rate) - activationValue float64 // activation value - host string // connection string for either HTTP or AMQP protocol - protocol string // either http or amqp protocol - vhostName string // override the vhost from the connection info - useRegex bool // specify if the queueName contains a rexeg - excludeUnacknowledged bool // specify if the QueueLength value should exclude Unacknowledged messages (Ready messages only) - pageSize int64 // specify the page size if useRegex is enabled - operation string // specify the operation to apply in case of multiples queues - timeout time.Duration // custom http timeout for a specific trigger - triggerIndex int // scaler index - - username string - password string + connectionName string // name used for the AMQP connection + triggerIndex int // scaler index + + QueueName string `keda:"name=queueName, order=triggerMetadata"` + // QueueLength or MessageRate + Mode string `keda:"name=mode, order=triggerMetadata, optional, default=Unknown"` + // + QueueLength float64 `keda:"name=queueLength, order=triggerMetadata, optional"` + // trigger value (queue length or publish/sec. rate) + Value float64 `keda:"name=value, order=triggerMetadata, optional"` + // activation value + ActivationValue float64 `keda:"name=activationValue, order=triggerMetadata, optional"` + // connection string for either HTTP or AMQP protocol + Host string `keda:"name=host, order=triggerMetadata;authParams;resolvedEnv"` + // either http or amqp protocol + Protocol string `keda:"name=protocol, order=triggerMetadata;authParams, optional, default=auto"` + // override the vhost from the connection info + VhostName string `keda:"name=vhostName, order=triggerMetadata, optional"` + // specify if the queueName contains a rexeg + UseRegex bool `keda:"name=useRegex, order=triggerMetadata, optional"` + // specify if the QueueLength value should exclude Unacknowledged messages (Ready messages only) + ExcludeUnacknowledged bool `keda:"name=excludeUnacknowledged, order=triggerMetadata, optional"` + // specify the page size if useRegex is enabled + PageSize int64 `keda:"name=pageSize, order=triggerMetadata, optional, default=100"` + // specify the operation to apply in case of multiples queues + Operation string `keda:"name=operation, order=triggerMetadata, optional, default=sum"` + // custom http timeout for a specific trigger + TimeoutMs int `keda:"name=timeout, order=triggerMetadata, optional"` + + Username string `keda:"name=username, order=authParams;resolvedEnv, optional"` + Password string `keda:"name=password, order=authParams;resolvedEnv, optional"` // TLS - ca string - cert string - key string - keyPassword string - enableTLS bool - unsafeSsl bool + Ca string `keda:"name=ca, order=authParams, optional"` + Cert string `keda:"name=cert, order=authParams, optional"` + Key string `keda:"name=key, order=authParams, optional"` + KeyPassword string `keda:"name=keyPassword, order=authParams, optional"` + EnableTLS string `keda:"name=tls, order=authParams, optional, default=disable"` + UnsafeSsl bool `keda:"name=unsafeSsl, order=triggerMetadata, optional"` // token provider for azure AD + WorkloadIdentityResource string `keda:"name=workloadIdentityResource, order=authParams, optional"` workloadIdentityClientID string workloadIdentityTenantID string workloadIdentityAuthorityHost string - workloadIdentityResource string +} + +func (r *rabbitMQMetadata) Validate() error { + if r.Protocol != amqpProtocol && r.Protocol != httpProtocol && r.Protocol != autoProtocol { + return fmt.Errorf("the protocol has to be either `%s`, `%s`, or `%s` but is `%s`", + amqpProtocol, httpProtocol, autoProtocol, r.Protocol) + } + + if r.EnableTLS != rmqTLSEnable && r.EnableTLS != rmqTLSDisable { + return fmt.Errorf("err incorrect value for TLS given: %s", r.EnableTLS) + } + + certGiven := r.Cert != "" + keyGiven := r.Key != "" + if certGiven != keyGiven { + return fmt.Errorf("both key and cert must be provided") + } + + if r.PageSize < 1 { + return fmt.Errorf("pageSize should be 1 or greater than 1") + } + + if (r.Username != "" || r.Password != "") && (r.Username == "" || r.Password == "") { + return fmt.Errorf("username and password must be given together") + } + + // If the protocol is auto, check the host scheme. + if r.Protocol == autoProtocol { + parsedURL, err := url.Parse(r.Host) + if err != nil { + return fmt.Errorf("can't parse host to find protocol: %w", err) + } + switch parsedURL.Scheme { + case "amqp", "amqps": + r.Protocol = amqpProtocol + case "http", "https": + r.Protocol = httpProtocol + default: + return fmt.Errorf("unknown host URL scheme `%s`", parsedURL.Scheme) + } + } + + if r.Protocol == amqpProtocol && r.WorkloadIdentityResource != "" { + return fmt.Errorf("workload identity is not supported for amqp protocol currently") + } + + if r.UseRegex && r.Protocol != httpProtocol { + return fmt.Errorf("configure only useRegex with http protocol") + } + + if r.ExcludeUnacknowledged && r.Protocol != httpProtocol { + return fmt.Errorf("configure excludeUnacknowledged=true with http protocol only") + } + + if err := r.validateTrigger(); err != nil { + return err + } + + return nil +} + +func (r *rabbitMQMetadata) validateTrigger() error { + // If nothing is specified for the trigger then return the default + if r.QueueLength == 0 && r.Mode == rabbitModeUnknown && r.Value == 0 { + r.Mode = rabbitModeQueueLength + r.Value = defaultRabbitMQQueueLength + return nil + } + + if r.QueueLength != 0 && (r.Mode != rabbitModeUnknown || r.Value != 0) { + return fmt.Errorf("queueLength is deprecated; configure only %s and %s", rabbitModeTriggerConfigName, rabbitValueTriggerConfigName) + } + + if r.QueueLength != 0 { + r.Mode = rabbitModeQueueLength + r.Value = r.QueueLength + + return nil + } + + if r.Mode == rabbitModeUnknown { + return fmt.Errorf("%s must be specified", rabbitModeTriggerConfigName) + } + + if r.Value == 0 { + return fmt.Errorf("%s must be specified", rabbitValueTriggerConfigName) + } + + if r.Mode != rabbitModeQueueLength && r.Mode != rabbitModeMessageRate { + return fmt.Errorf("trigger mode %s must be one of %s, %s", r.Mode, rabbitModeQueueLength, rabbitModeMessageRate) + } + + if r.Mode == rabbitModeMessageRate && r.Protocol != httpProtocol { + return fmt.Errorf("protocol %s not supported; must be http to use mode %s", r.Protocol, rabbitModeMessageRate) + } + + if r.Protocol == amqpProtocol && r.TimeoutMs != 0 { + return fmt.Errorf("amqp protocol doesn't support custom timeouts: %d", r.TimeoutMs) + } + + if r.TimeoutMs < 0 { + return fmt.Errorf("timeout must be greater than 0: %d", r.TimeoutMs) + } + return nil } type queueInfo struct { @@ -139,32 +257,40 @@ func NewRabbitMQScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { if err != nil { return nil, fmt.Errorf("error parsing rabbitmq metadata: %w", err) } + s.metadata = meta - s.httpClient = kedautil.CreateHTTPClient(meta.timeout, meta.unsafeSsl) - if meta.enableTLS { - tlsConfig, tlsErr := kedautil.NewTLSConfigWithPassword(meta.cert, meta.key, meta.keyPassword, meta.ca, meta.unsafeSsl) + var timeout time.Duration + if s.metadata.TimeoutMs != 0 { + timeout = time.Duration(s.metadata.TimeoutMs) * time.Millisecond + } else { + timeout = config.GlobalHTTPTimeout + } + + s.httpClient = kedautil.CreateHTTPClient(timeout, meta.UnsafeSsl) + if meta.EnableTLS == rmqTLSEnable { + tlsConfig, tlsErr := kedautil.NewTLSConfigWithPassword(meta.Cert, meta.Key, meta.KeyPassword, meta.Ca, meta.UnsafeSsl) if tlsErr != nil { return nil, tlsErr } s.httpClient.Transport = kedautil.CreateHTTPTransportWithTLSConfig(tlsConfig) } - if meta.protocol == amqpProtocol { + if meta.Protocol == amqpProtocol { // Override vhost if requested. - host := meta.host - if meta.vhostName != "" || (meta.username != "" && meta.password != "") { + host := meta.Host + if meta.VhostName != "" || (meta.Username != "" && meta.Password != "") { hostURI, err := amqp.ParseURI(host) if err != nil { return nil, fmt.Errorf("error parsing rabbitmq connection string: %w", err) } - if meta.vhostName != "" { - hostURI.Vhost = meta.vhostName + if meta.VhostName != "" { + hostURI.Vhost = meta.VhostName } - if meta.username != "" && meta.password != "" { - hostURI.Username = meta.username - hostURI.Password = meta.password + if meta.Username != "" && meta.Password != "" { + hostURI.Username = meta.Username + hostURI.Password = meta.Password } host = hostURI.String() @@ -181,308 +307,24 @@ func NewRabbitMQScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { return s, nil } -func resolveProtocol(config *scalersconfig.ScalerConfig, meta *rabbitMQMetadata) error { - meta.protocol = defaultProtocol - if val, ok := config.AuthParams["protocol"]; ok { - meta.protocol = val - } - if val, ok := config.TriggerMetadata["protocol"]; ok { - meta.protocol = val - } - if meta.protocol != amqpProtocol && meta.protocol != httpProtocol && meta.protocol != autoProtocol { - return fmt.Errorf("the protocol has to be either `%s`, `%s`, or `%s` but is `%s`", amqpProtocol, httpProtocol, autoProtocol, meta.protocol) - } - return nil -} - -func resolveHostValue(config *scalersconfig.ScalerConfig, meta *rabbitMQMetadata) error { - switch { - case config.AuthParams["host"] != "": - meta.host = config.AuthParams["host"] - case config.TriggerMetadata["host"] != "": - meta.host = config.TriggerMetadata["host"] - case config.TriggerMetadata["hostFromEnv"] != "": - meta.host = config.ResolvedEnv[config.TriggerMetadata["hostFromEnv"]] - default: - return fmt.Errorf("no host setting given") - } - return nil -} - -func resolveTimeout(config *scalersconfig.ScalerConfig, meta *rabbitMQMetadata) error { - if val, ok := config.TriggerMetadata["timeout"]; ok { - timeoutMS, err := strconv.Atoi(val) - if err != nil { - return fmt.Errorf("unable to parse timeout: %w", err) - } - if meta.protocol == amqpProtocol { - return fmt.Errorf("amqp protocol doesn't support custom timeouts: %w", err) - } - if timeoutMS <= 0 { - return fmt.Errorf("timeout must be greater than 0: %w", err) - } - meta.timeout = time.Duration(timeoutMS) * time.Millisecond - } else { - meta.timeout = config.GlobalHTTPTimeout - } - return nil -} - -func resolveTLSAuthParams(config *scalersconfig.ScalerConfig, meta *rabbitMQMetadata) error { - meta.enableTLS = false - if val, ok := config.AuthParams["tls"]; ok { - val = strings.TrimSpace(val) - if val == rmqTLSEnable { - meta.ca = config.AuthParams["ca"] - meta.cert = config.AuthParams["cert"] - meta.key = config.AuthParams["key"] - meta.enableTLS = true - } else if val != "disable" { - return fmt.Errorf("err incorrect value for TLS given: %s", val) - } - } - return nil -} - -func resolveAuth(config *scalersconfig.ScalerConfig, meta *rabbitMQMetadata) error { - usernameVal, err := getParameterFromConfigV2(config, "username", reflect.TypeOf(meta.username), - UseAuthentication(true), UseResolvedEnv(true), IsOptional(true)) - if err != nil { - return err - } - meta.username = usernameVal.(string) - - passwordVal, err := getParameterFromConfigV2(config, "password", reflect.TypeOf(meta.username), - UseAuthentication(true), UseResolvedEnv(true), IsOptional(true)) - if err != nil { - return err - } - meta.password = passwordVal.(string) - - if (meta.username != "" || meta.password != "") && (meta.username == "" || meta.password == "") { - return fmt.Errorf("username and password must be given together") - } - - return nil -} - func parseRabbitMQMetadata(config *scalersconfig.ScalerConfig) (*rabbitMQMetadata, error) { - meta := rabbitMQMetadata{ + meta := &rabbitMQMetadata{ connectionName: connectionName(config), } - // Resolve protocol type - if err := resolveProtocol(config, &meta); err != nil { - return nil, err - } - - // Resolve host value - if err := resolveHostValue(config, &meta); err != nil { - return nil, err - } - - // Resolve TLS authentication parameters - if err := resolveTLSAuthParams(config, &meta); err != nil { - return nil, err - } - - // Resolve username and password - if err := resolveAuth(config, &meta); err != nil { - return nil, err + if err := config.TypedConfig(meta); err != nil { + return nil, fmt.Errorf("error parsing rabbitmq metadata: %w", err) } - meta.keyPassword = config.AuthParams["keyPassword"] - if config.PodIdentity.Provider == v1alpha1.PodIdentityProviderAzureWorkload { - if config.AuthParams["workloadIdentityResource"] != "" { + if meta.WorkloadIdentityResource != "" { meta.workloadIdentityClientID = config.PodIdentity.GetIdentityID() meta.workloadIdentityTenantID = config.PodIdentity.GetIdentityTenantID() - meta.workloadIdentityResource = config.AuthParams["workloadIdentityResource"] - } - } - - certGiven := meta.cert != "" - keyGiven := meta.key != "" - if certGiven != keyGiven { - return nil, fmt.Errorf("both key and cert must be provided") - } - - meta.unsafeSsl = false - if val, ok := config.TriggerMetadata["unsafeSsl"]; ok { - boolVal, err := strconv.ParseBool(val) - if err != nil { - return nil, fmt.Errorf("failed to parse unsafeSsl value. Must be either true or false") - } - meta.unsafeSsl = boolVal - } - - // If the protocol is auto, check the host scheme. - if meta.protocol == autoProtocol { - parsedURL, err := url.Parse(meta.host) - if err != nil { - return nil, fmt.Errorf("can't parse host to find protocol: %w", err) - } - switch parsedURL.Scheme { - case "amqp", "amqps": - meta.protocol = amqpProtocol - case "http", "https": - meta.protocol = httpProtocol - default: - return nil, fmt.Errorf("unknown host URL scheme `%s`", parsedURL.Scheme) } } - if meta.protocol == amqpProtocol && config.AuthParams["workloadIdentityResource"] != "" { - return nil, fmt.Errorf("workload identity is not supported for amqp protocol currently") - } - - // Resolve queueName - if val, ok := config.TriggerMetadata["queueName"]; ok { - meta.queueName = val - } else { - return nil, fmt.Errorf("no queue name given") - } - - // Resolve vhostName - if val, ok := config.TriggerMetadata["vhostName"]; ok { - meta.vhostName = val - } - - err := parseRabbitMQHttpProtocolMetadata(config, &meta) - if err != nil { - return nil, err - } - - if meta.useRegex && meta.protocol != httpProtocol { - return nil, fmt.Errorf("configure only useRegex with http protocol") - } - - if meta.excludeUnacknowledged && meta.protocol != httpProtocol { - return nil, fmt.Errorf("configure excludeUnacknowledged=true with http protocol only") - } - - _, err = parseTrigger(&meta, config) - if err != nil { - return nil, fmt.Errorf("unable to parse trigger: %w", err) - } - // Resolve timeout - if err := resolveTimeout(config, &meta); err != nil { - return nil, err - } meta.triggerIndex = config.TriggerIndex - return &meta, nil -} - -func parseRabbitMQHttpProtocolMetadata(config *scalersconfig.ScalerConfig, meta *rabbitMQMetadata) error { - // Resolve useRegex - if val, ok := config.TriggerMetadata["useRegex"]; ok { - useRegex, err := strconv.ParseBool(val) - if err != nil { - return fmt.Errorf("useRegex has invalid value") - } - meta.useRegex = useRegex - } - - // Resolve excludeUnacknowledged - if val, ok := config.TriggerMetadata["excludeUnacknowledged"]; ok { - excludeUnacknowledged, err := strconv.ParseBool(val) - if err != nil { - return fmt.Errorf("excludeUnacknowledged has invalid value") - } - meta.excludeUnacknowledged = excludeUnacknowledged - } - - // Resolve pageSize - if val, ok := config.TriggerMetadata["pageSize"]; ok { - pageSize, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return fmt.Errorf("pageSize has invalid value") - } - meta.pageSize = pageSize - if meta.pageSize < 1 { - return fmt.Errorf("pageSize should be 1 or greater than 1") - } - } else { - meta.pageSize = 100 - } - - // Resolve operation - meta.operation = defaultOperation - if val, ok := config.TriggerMetadata["operation"]; ok { - meta.operation = val - } - - return nil -} - -func parseTrigger(meta *rabbitMQMetadata, config *scalersconfig.ScalerConfig) (*rabbitMQMetadata, error) { - deprecatedQueueLengthValue, deprecatedQueueLengthPresent := config.TriggerMetadata[rabbitQueueLengthMetricName] - mode, modePresent := config.TriggerMetadata[rabbitModeTriggerConfigName] - value, valuePresent := config.TriggerMetadata[rabbitValueTriggerConfigName] - activationValue, activationValuePresent := config.TriggerMetadata[rabbitActivationValueTriggerConfigName] - - // Initialize to default trigger settings - meta.mode = rabbitModeQueueLength - meta.value = defaultRabbitMQQueueLength - - // If nothing is specified for the trigger then return the default - if !deprecatedQueueLengthPresent && !modePresent && !valuePresent { - return meta, nil - } - - // Only allow one of `queueLength` or `mode`/`value` - if deprecatedQueueLengthPresent && (modePresent || valuePresent) { - return nil, fmt.Errorf("queueLength is deprecated; configure only %s and %s", rabbitModeTriggerConfigName, rabbitValueTriggerConfigName) - } - - // Parse activation value - if activationValuePresent { - activation, err := strconv.ParseFloat(activationValue, 64) - if err != nil { - return nil, fmt.Errorf("can't parse %s: %w", rabbitActivationValueTriggerConfigName, err) - } - meta.activationValue = activation - } - - // Parse deprecated `queueLength` value - if deprecatedQueueLengthPresent { - queueLength, err := strconv.ParseFloat(deprecatedQueueLengthValue, 64) - if err != nil { - return nil, fmt.Errorf("can't parse %s: %w", rabbitQueueLengthMetricName, err) - } - meta.mode = rabbitModeQueueLength - meta.value = queueLength - - return meta, nil - } - - if !modePresent { - return nil, fmt.Errorf("%s must be specified", rabbitModeTriggerConfigName) - } - if !valuePresent { - return nil, fmt.Errorf("%s must be specified", rabbitValueTriggerConfigName) - } - - // Resolve trigger mode - switch mode { - case rabbitModeQueueLength: - meta.mode = rabbitModeQueueLength - case rabbitModeMessageRate: - meta.mode = rabbitModeMessageRate - default: - return nil, fmt.Errorf("trigger mode %s must be one of %s, %s", mode, rabbitModeQueueLength, rabbitModeMessageRate) - } - triggerValue, err := strconv.ParseFloat(value, 64) - if err != nil { - return nil, fmt.Errorf("can't parse %s: %w", rabbitValueTriggerConfigName, err) - } - meta.value = triggerValue - - if meta.mode == rabbitModeMessageRate && meta.protocol != httpProtocol { - return nil, fmt.Errorf("protocol %s not supported; must be http to use mode %s", meta.protocol, rabbitModeMessageRate) - } - return meta, nil } @@ -496,8 +338,8 @@ func getConnectionAndChannel(host string, meta *rabbitMQMetadata) (*amqp.Connect }, } - if meta.enableTLS { - tlsConfig, err := kedautil.NewTLSConfigWithPassword(meta.cert, meta.key, meta.keyPassword, meta.ca, meta.unsafeSsl) + if meta.EnableTLS == rmqTLSEnable { + tlsConfig, err := kedautil.NewTLSConfigWithPassword(meta.Cert, meta.Key, meta.KeyPassword, meta.Ca, meta.UnsafeSsl) if err != nil { return nil, nil, err } @@ -534,13 +376,13 @@ func (s *rabbitMQScaler) Close(context.Context) error { } func (s *rabbitMQScaler) getQueueStatus(ctx context.Context) (int64, float64, error) { - if s.metadata.protocol == httpProtocol { + if s.metadata.Protocol == httpProtocol { info, err := s.getQueueInfoViaHTTP(ctx) if err != nil { return -1, -1, err } - if s.metadata.excludeUnacknowledged { + if s.metadata.ExcludeUnacknowledged { // messages count includes only ready return int64(info.MessagesReady), info.MessageStat.PublishDetail.Rate, nil } @@ -549,7 +391,7 @@ func (s *rabbitMQScaler) getQueueStatus(ctx context.Context) (int64, float64, er } // QueueDeclarePassive assumes that the queue exists and fails if it doesn't - items, err := s.channel.QueueDeclarePassive(s.metadata.queueName, false, false, false, false, amqp.Table{}) + items, err := s.channel.QueueDeclarePassive(s.metadata.QueueName, false, false, false, false, amqp.Table{}) if err != nil { return -1, -1, err } @@ -565,9 +407,9 @@ func getJSON(ctx context.Context, s *rabbitMQScaler, url string) (queueInfo, err return result, err } - if s.metadata.workloadIdentityResource != "" { + if s.metadata.WorkloadIdentityResource != "" { if s.azureOAuth == nil { - s.azureOAuth = azure.NewAzureADWorkloadIdentityTokenProvider(ctx, s.metadata.workloadIdentityClientID, s.metadata.workloadIdentityTenantID, s.metadata.workloadIdentityAuthorityHost, s.metadata.workloadIdentityResource) + s.azureOAuth = azure.NewAzureADWorkloadIdentityTokenProvider(ctx, s.metadata.workloadIdentityClientID, s.metadata.workloadIdentityTenantID, s.metadata.workloadIdentityAuthorityHost, s.metadata.WorkloadIdentityResource) } err = s.azureOAuth.Refresh() @@ -586,7 +428,7 @@ func getJSON(ctx context.Context, s *rabbitMQScaler, url string) (queueInfo, err defer r.Body.Close() if r.StatusCode == 200 { - if s.metadata.useRegex { + if s.metadata.UseRegex { var queues regexQueueInfo err = json.NewDecoder(r.Body).Decode(&queues) if err != nil { @@ -626,24 +468,24 @@ func getVhostAndPathFromURL(rawPath, vhostName string) (resolvedVhostPath, resol } func (s *rabbitMQScaler) getQueueInfoViaHTTP(ctx context.Context) (*queueInfo, error) { - parsedURL, err := url.Parse(s.metadata.host) + parsedURL, err := url.Parse(s.metadata.Host) if err != nil { return nil, err } - vhost, subpaths := getVhostAndPathFromURL(parsedURL.Path, s.metadata.vhostName) + vhost, subpaths := getVhostAndPathFromURL(parsedURL.Path, s.metadata.VhostName) parsedURL.Path = subpaths - if s.metadata.username != "" && s.metadata.password != "" { - parsedURL.User = url.UserPassword(s.metadata.username, s.metadata.password) + if s.metadata.Username != "" && s.metadata.Password != "" { + parsedURL.User = url.UserPassword(s.metadata.Username, s.metadata.Password) } var getQueueInfoManagementURI string - if s.metadata.useRegex { - getQueueInfoManagementURI = fmt.Sprintf("%s/api/queues%s?page=1&use_regex=true&pagination=false&name=%s&page_size=%d", parsedURL.String(), vhost, url.QueryEscape(s.metadata.queueName), s.metadata.pageSize) + if s.metadata.UseRegex { + getQueueInfoManagementURI = fmt.Sprintf("%s/api/queues%s?page=1&use_regex=true&pagination=false&name=%s&page_size=%d", parsedURL.String(), vhost, url.QueryEscape(s.metadata.QueueName), s.metadata.PageSize) } else { - getQueueInfoManagementURI = fmt.Sprintf("%s/api/queues%s/%s", parsedURL.String(), vhost, url.QueryEscape(s.metadata.queueName)) + getQueueInfoManagementURI = fmt.Sprintf("%s/api/queues%s/%s", parsedURL.String(), vhost, url.QueryEscape(s.metadata.QueueName)) } var info queueInfo @@ -660,9 +502,9 @@ func (s *rabbitMQScaler) getQueueInfoViaHTTP(ctx context.Context) (*queueInfo, e func (s *rabbitMQScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("rabbitmq-%s", url.QueryEscape(s.metadata.queueName)))), + Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("rabbitmq-%s", url.QueryEscape(s.metadata.QueueName)))), }, - Target: GetMetricTargetMili(s.metricType, s.metadata.value), + Target: GetMetricTargetMili(s.metricType, s.metadata.Value), } metricSpec := v2.MetricSpec{ External: externalMetric, Type: rabbitMetricType, @@ -680,12 +522,12 @@ func (s *rabbitMQScaler) GetMetricsAndActivity(ctx context.Context, metricName s var metric external_metrics.ExternalMetricValue var isActive bool - if s.metadata.mode == rabbitModeQueueLength { + if s.metadata.Mode == rabbitModeQueueLength { metric = GenerateMetricInMili(metricName, float64(messages)) - isActive = float64(messages) > s.metadata.activationValue + isActive = float64(messages) > s.metadata.ActivationValue } else { metric = GenerateMetricInMili(metricName, publishRate) - isActive = publishRate > s.metadata.activationValue || float64(messages) > s.metadata.activationValue + isActive = publishRate > s.metadata.ActivationValue || float64(messages) > s.metadata.ActivationValue } return []external_metrics.ExternalMetricValue{metric}, isActive, nil @@ -696,7 +538,7 @@ func getComposedQueue(s *rabbitMQScaler, q []queueInfo) (queueInfo, error) { queue.Name = "composed-queue" queue.MessagesUnacknowledged = 0 if len(q) > 0 { - switch s.metadata.operation { + switch s.metadata.Operation { case sumOperation: sumMessages, sumReady, sumRate := getSum(q) queue.Messages = sumMessages @@ -713,7 +555,7 @@ func getComposedQueue(s *rabbitMQScaler, q []queueInfo) (queueInfo, error) { queue.MessagesReady = maxReady queue.MessageStat.PublishDetail.Rate = maxRate default: - return queue, fmt.Errorf("operation mode %s must be one of %s, %s, %s", s.metadata.operation, sumOperation, avgOperation, maxOperation) + return queue, fmt.Errorf("operation mode %s must be one of %s, %s, %s", s.metadata.Operation, sumOperation, avgOperation, maxOperation) } } else { queue.Messages = 0 diff --git a/pkg/scalers/rabbitmq_scaler_test.go b/pkg/scalers/rabbitmq_scaler_test.go index dd9c3f900b8..ed1785e5be3 100644 --- a/pkg/scalers/rabbitmq_scaler_test.go +++ b/pkg/scalers/rabbitmq_scaler_test.go @@ -34,7 +34,7 @@ type parseRabbitMQAuthParamTestData struct { podIdentity v1alpha1.AuthPodIdentity authParams map[string]string isError bool - enableTLS bool + enableTLS string workloadIdentity bool } @@ -142,35 +142,35 @@ var testRabbitMQMetadata = []parseRabbitMQMetadataTestData{ } var testRabbitMQAuthParamData = []parseRabbitMQAuthParamTestData{ - {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "enable", "ca": "caaa", "cert": "ceert", "key": "keey"}, false, true, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "enable", "ca": "caaa", "cert": "ceert", "key": "keey"}, false, rmqTLSEnable, false}, // success, TLS cert/key and assumed public CA - {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "enable", "cert": "ceert", "key": "keey"}, false, true, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "enable", "cert": "ceert", "key": "keey"}, false, rmqTLSEnable, false}, // success, TLS cert/key + key password and assumed public CA - {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "enable", "cert": "ceert", "key": "keey", "keyPassword": "keeyPassword"}, false, true, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "enable", "cert": "ceert", "key": "keey", "keyPassword": "keeyPassword"}, false, rmqTLSEnable, false}, // success, TLS CA only - {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "enable", "ca": "caaa"}, false, true, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "enable", "ca": "caaa"}, false, rmqTLSEnable, false}, // failure, TLS missing cert - {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "enable", "ca": "caaa", "key": "kee"}, true, true, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "enable", "ca": "caaa", "key": "kee"}, true, rmqTLSEnable, false}, // failure, TLS missing key - {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "enable", "ca": "caaa", "cert": "ceert"}, true, true, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "enable", "ca": "caaa", "cert": "ceert"}, true, rmqTLSEnable, false}, // failure, TLS invalid - {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "yes", "ca": "caaa", "cert": "ceert", "key": "kee"}, true, true, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "yes", "ca": "caaa", "cert": "ceert", "key": "kee"}, true, rmqTLSEnable, false}, // success, username and password - {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"username": "user", "password": "PASSWORD"}, false, false, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"username": "user", "password": "PASSWORD"}, false, rmqTLSDisable, false}, // failure, username but no password - {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"username": "user"}, true, false, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"username": "user"}, true, rmqTLSDisable, false}, // failure, password but no username - {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"password": "PASSWORD"}, true, false, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"password": "PASSWORD"}, true, rmqTLSDisable, false}, // success, username and password from env - {map[string]string{"queueName": "sample", "hostFromEnv": host, "usernameFromEnv": rabbitMQUsername, "passwordFromEnv": rabbitMQPassword}, v1alpha1.AuthPodIdentity{}, map[string]string{}, false, false, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host, "usernameFromEnv": rabbitMQUsername, "passwordFromEnv": rabbitMQPassword}, v1alpha1.AuthPodIdentity{}, map[string]string{}, false, rmqTLSDisable, false}, // failure, username from env but not password - {map[string]string{"queueName": "sample", "hostFromEnv": host, "usernameFromEnv": rabbitMQUsername}, v1alpha1.AuthPodIdentity{}, map[string]string{}, true, false, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host, "usernameFromEnv": rabbitMQUsername}, v1alpha1.AuthPodIdentity{}, map[string]string{}, true, rmqTLSDisable, false}, // failure, password from env but not username - {map[string]string{"queueName": "sample", "hostFromEnv": host, "passwordFromEnv": rabbitMQPassword}, v1alpha1.AuthPodIdentity{}, map[string]string{}, true, false, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host, "passwordFromEnv": rabbitMQPassword}, v1alpha1.AuthPodIdentity{}, map[string]string{}, true, rmqTLSDisable, false}, // success, WorkloadIdentity - {map[string]string{"queueName": "sample", "hostFromEnv": host, "protocol": "http"}, v1alpha1.AuthPodIdentity{Provider: v1alpha1.PodIdentityProviderAzureWorkload, IdentityID: kedautil.StringPointer("client-id")}, map[string]string{"workloadIdentityResource": "rabbitmq-resource-id"}, false, false, true}, + {map[string]string{"queueName": "sample", "hostFromEnv": host, "protocol": "http"}, v1alpha1.AuthPodIdentity{Provider: v1alpha1.PodIdentityProviderAzureWorkload, IdentityID: kedautil.StringPointer("client-id")}, map[string]string{"workloadIdentityResource": "rabbitmq-resource-id"}, false, rmqTLSDisable, true}, // failure, WoekloadIdentity not supported for amqp - {map[string]string{"queueName": "sample", "hostFromEnv": host, "protocol": "amqp"}, v1alpha1.AuthPodIdentity{Provider: v1alpha1.PodIdentityProviderAzureWorkload, IdentityID: kedautil.StringPointer("client-id")}, map[string]string{"workloadIdentityResource": "rabbitmq-resource-id"}, true, false, false}, + {map[string]string{"queueName": "sample", "hostFromEnv": host, "protocol": "amqp"}, v1alpha1.AuthPodIdentity{Provider: v1alpha1.PodIdentityProviderAzureWorkload, IdentityID: kedautil.StringPointer("client-id")}, map[string]string{"workloadIdentityResource": "rabbitmq-resource-id"}, true, rmqTLSDisable, false}, } var rabbitMQMetricIdentifiers = []rabbitMQMetricIdentifier{ {&testRabbitMQMetadata[1], 0, "s0-rabbitmq-sample"}, @@ -191,8 +191,8 @@ func TestRabbitMQParseMetadata(t *testing.T) { if err != nil && !testData.isError { t.Errorf("Expect error but got success in test case %d", idx) } - if boolVal != meta.unsafeSsl { - t.Errorf("Expect %t but got %t in test case %d", boolVal, meta.unsafeSsl, idx) + if boolVal != meta.UnsafeSsl { + t.Errorf("Expect %t but got %t in test case %d", boolVal, meta.UnsafeSsl, idx) } } } @@ -207,25 +207,25 @@ func TestRabbitMQParseAuthParamData(t *testing.T) { if testData.isError && err == nil { t.Error("Expected error but got success") } - if metadata != nil && metadata.enableTLS != testData.enableTLS { - t.Errorf("Expected enableTLS to be set to %v but got %v\n", testData.enableTLS, metadata.enableTLS) + if metadata != nil && metadata.EnableTLS != testData.enableTLS { + t.Errorf("Expected enableTLS to be set to %v but got %v\n", testData.enableTLS, metadata.EnableTLS) } - if metadata != nil && metadata.enableTLS { - if metadata.ca != testData.authParams["ca"] { - t.Errorf("Expected ca to be set to %v but got %v\n", testData.authParams["ca"], metadata.enableTLS) + if metadata != nil && metadata.EnableTLS == rmqTLSEnable { + if metadata.Ca != testData.authParams["ca"] { + t.Errorf("Expected ca to be set to %v but got %v\n", testData.authParams["ca"], metadata.EnableTLS) } - if metadata.cert != testData.authParams["cert"] { - t.Errorf("Expected cert to be set to %v but got %v\n", testData.authParams["cert"], metadata.cert) + if metadata.Cert != testData.authParams["cert"] { + t.Errorf("Expected cert to be set to %v but got %v\n", testData.authParams["cert"], metadata.Cert) } - if metadata.key != testData.authParams["key"] { - t.Errorf("Expected key to be set to %v but got %v\n", testData.authParams["key"], metadata.key) + if metadata.Key != testData.authParams["key"] { + t.Errorf("Expected key to be set to %v but got %v\n", testData.authParams["key"], metadata.Key) } - if metadata.keyPassword != testData.authParams["keyPassword"] { - t.Errorf("Expected key to be set to %v but got %v\n", testData.authParams["keyPassword"], metadata.key) + if metadata.KeyPassword != testData.authParams["keyPassword"] { + t.Errorf("Expected key to be set to %v but got %v\n", testData.authParams["keyPassword"], metadata.Key) } } if metadata != nil && metadata.workloadIdentityClientID != "" && !testData.workloadIdentity { - t.Errorf("Expected workloadIdentity to be disabled but got %v as client ID and %v as resource\n", metadata.workloadIdentityClientID, metadata.workloadIdentityResource) + t.Errorf("Expected workloadIdentity to be disabled but got %v as client ID and %v as resource\n", metadata.workloadIdentityClientID, metadata.WorkloadIdentityResource) } if metadata != nil && metadata.workloadIdentityClientID == "" && testData.workloadIdentity { t.Error("Expected workloadIdentity to be enabled but was not\n") @@ -248,8 +248,8 @@ func TestParseDefaultQueueLength(t *testing.T) { t.Error("Expected success but got error", err) case testData.isError && err == nil: t.Error("Expected error but got success") - case metadata.value != defaultRabbitMQQueueLength: - t.Error("Expected default queueLength =", defaultRabbitMQQueueLength, "but got", metadata.value) + case metadata.Value != defaultRabbitMQQueueLength: + t.Error("Expected default queueLength =", defaultRabbitMQQueueLength, "but got", metadata.Value) } } } diff --git a/pkg/scalers/selenium_grid_scaler.go b/pkg/scalers/selenium_grid_scaler.go index 3cba72fbc9e..057181c87e7 100644 --- a/pkg/scalers/selenium_grid_scaler.go +++ b/pkg/scalers/selenium_grid_scaler.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "math" "net/http" "strings" @@ -29,72 +28,86 @@ type seleniumGridScaler struct { type seleniumGridScalerMetadata struct { triggerIndex int - URL string `keda:"name=url, order=triggerMetadata;authParams"` - BrowserName string `keda:"name=browserName, order=triggerMetadata"` - SessionBrowserName string `keda:"name=sessionBrowserName, order=triggerMetadata, optional"` - ActivationThreshold int64 `keda:"name=activationThreshold, order=triggerMetadata, optional"` - BrowserVersion string `keda:"name=browserVersion, order=triggerMetadata, optional, default=latest"` - UnsafeSsl bool `keda:"name=unsafeSsl, order=triggerMetadata, optional, default=false"` - PlatformName string `keda:"name=platformName, order=triggerMetadata, optional, default=linux"` - SessionsPerNode int64 `keda:"name=sessionsPerNode, order=triggerMetadata, optional, default=1"` - SetSessionsFromHub bool `keda:"name=setSessionsFromHub, order=triggerMetadata, optional, default=false"` - SessionBrowserVersion string `keda:"name=sessionBrowserVersion, order=triggerMetadata, optional"` - - // auth - Username string `keda:"name=username, order=authParams;resolvedEnv;triggerMetadata, optional"` - Password string `keda:"name=password, order=authParams;resolvedEnv;triggerMetadata, optional"` + URL string `keda:"name=url, order=authParams;triggerMetadata"` + AuthType string `keda:"name=authType, order=authParams;resolvedEnv, optional"` + Username string `keda:"name=username, order=authParams;resolvedEnv, optional"` + Password string `keda:"name=password, order=authParams;resolvedEnv, optional"` + AccessToken string `keda:"name=accessToken, order=authParams;resolvedEnv, optional"` + BrowserName string `keda:"name=browserName, order=triggerMetadata"` + SessionBrowserName string `keda:"name=sessionBrowserName, order=triggerMetadata, optional"` + ActivationThreshold int64 `keda:"name=activationThreshold, order=triggerMetadata, optional"` + BrowserVersion string `keda:"name=browserVersion, order=triggerMetadata, optional, default=latest"` + UnsafeSsl bool `keda:"name=unsafeSsl, order=triggerMetadata, optional, default=false"` + PlatformName string `keda:"name=platformName, order=triggerMetadata, optional, default=linux"` + NodeMaxSessions int `keda:"name=nodeMaxSessions, order=triggerMetadata, optional, default=1"` TargetValue int64 } -type seleniumResponse struct { - Data data `json:"data"` +type SeleniumResponse struct { + Data Data `json:"data"` } -type data struct { - Grid grid `json:"grid"` - NodesInfo nodesInfo `json:"nodesInfo"` - SessionsInfo sessionsInfo `json:"sessionsInfo"` +type Data struct { + Grid Grid `json:"grid"` + NodesInfo NodesInfo `json:"nodesInfo"` + SessionsInfo SessionsInfo `json:"sessionsInfo"` } -type grid struct { - MaxSession int `json:"maxSession"` - NodeCount int `json:"nodeCount"` +type Grid struct { + SessionCount int `json:"sessionCount"` + MaxSession int `json:"maxSession"` + TotalSlots int `json:"totalSlots"` } -type sessionsInfo struct { - SessionQueueRequests []string `json:"sessionQueueRequests"` - Sessions []seleniumSession `json:"sessions"` +type NodesInfo struct { + Nodes Nodes `json:"nodes"` } -type seleniumSession struct { +type SessionsInfo struct { + SessionQueueRequests []string `json:"sessionQueueRequests"` +} + +type Nodes []struct { + ID string `json:"id"` + Status string `json:"status"` + SessionCount int `json:"sessionCount"` + MaxSession int `json:"maxSession"` + SlotCount int `json:"slotCount"` + Stereotypes string `json:"stereotypes"` + Sessions Sessions `json:"sessions"` +} + +type ReservedNodes struct { + ID string `json:"id"` + MaxSession int `json:"maxSession"` + SlotCount int `json:"slotCount"` +} + +type Sessions []struct { ID string `json:"id"` Capabilities string `json:"capabilities"` - NodeID string `json:"nodeId"` + Slot Slot `json:"slot"` +} + +type Slot struct { + ID string `json:"id"` + Stereotype string `json:"stereotype"` } -type capability struct { +type Capability struct { BrowserName string `json:"browserName"` BrowserVersion string `json:"browserVersion"` PlatformName string `json:"platformName"` } -type nodesInfo struct { - Nodes []nodes `json:"nodes"` -} - -type nodes struct { - Stereotypes string `json:"stereotypes"` -} - -type stereotype struct { - Slots int64 `json:"slots"` - Stereotype capability `json:"stereotype"` +type Stereotypes []struct { + Slots int `json:"slots"` + Stereotype Capability `json:"stereotype"` } const ( DefaultBrowserVersion string = "latest" - DefaultPlatformName string = "linux" ) func NewSeleniumGridScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { @@ -135,13 +148,10 @@ func parseSeleniumGridScalerMetadata(config *scalersconfig.ScalerConfig) (*selen if meta.SessionBrowserName == "" { meta.SessionBrowserName = meta.BrowserName } - if meta.SessionBrowserVersion == "" { - meta.SessionBrowserVersion = meta.BrowserVersion - } return meta, nil } -// No cleanup required for selenium grid scaler +// No cleanup required for Selenium Grid scaler func (s *seleniumGridScaler) Close(context.Context) error { if s.httpClient != nil { s.httpClient.CloseIdleConnections() @@ -176,7 +186,7 @@ func (s *seleniumGridScaler) GetMetricSpecForScaling(context.Context) []v2.Metri func (s *seleniumGridScaler) getSessionsCount(ctx context.Context, logger logr.Logger) (int64, error) { body, err := json.Marshal(map[string]string{ - "query": "{ grid { maxSession, nodeCount }, nodesInfo { nodes { stereotypes } }, sessionsInfo { sessionQueueRequests, sessions { id, capabilities, nodeId } } }", + "query": "{ grid { sessionCount, maxSession, totalSlots }, nodesInfo { nodes { id, status, sessionCount, maxSession, slotCount, stereotypes, sessions { id, capabilities, slot { id, stereotype } } } }, sessionsInfo { sessionQueueRequests } }", }) if err != nil { @@ -188,8 +198,11 @@ func (s *seleniumGridScaler) getSessionsCount(ctx context.Context, logger logr.L return -1, err } - // Add HTTP Auth - req.SetBasicAuth(s.metadata.Username, s.metadata.Password) + if (s.metadata.AuthType == "" || strings.EqualFold(s.metadata.AuthType, "Basic")) && s.metadata.Username != "" && s.metadata.Password != "" { + req.SetBasicAuth(s.metadata.Username, s.metadata.Password) + } else if !strings.EqualFold(s.metadata.AuthType, "Basic") && s.metadata.AccessToken != "" { + req.Header.Set("Authorization", fmt.Sprintf("%s %s", s.metadata.AuthType, s.metadata.AccessToken)) + } res, err := s.httpClient.Do(req) if err != nil { @@ -206,101 +219,165 @@ func (s *seleniumGridScaler) getSessionsCount(ctx context.Context, logger logr.L if err != nil { return -1, err } - v, err := getCountFromSeleniumResponse(b, s.metadata.BrowserName, s.metadata.BrowserVersion, s.metadata.SessionBrowserName, s.metadata.PlatformName, s.metadata.SessionsPerNode, s.metadata.SetSessionsFromHub, s.metadata.SessionBrowserVersion, logger) + v, err := getCountFromSeleniumResponse(b, s.metadata.BrowserName, s.metadata.BrowserVersion, s.metadata.SessionBrowserName, s.metadata.PlatformName, s.metadata.NodeMaxSessions, logger) if err != nil { return -1, err } return v, nil } -func getCountFromSeleniumResponse(b []byte, browserName string, browserVersion string, sessionBrowserName string, platformName string, sessionsPerNode int64, setSessionsFromHub bool, sessionBrowserVersion string, logger logr.Logger) (int64, error) { - var count int64 - var slots int64 - var seleniumResponse = seleniumResponse{} - - if err := json.Unmarshal(b, &seleniumResponse); err != nil { - return 0, err - } - - if setSessionsFromHub { - slots = getSlotsFromSeleniumResponse(seleniumResponse, browserName, browserVersion, platformName, logger) - } - - var sessionQueueRequests = seleniumResponse.Data.SessionsInfo.SessionQueueRequests - for _, sessionQueueRequest := range sessionQueueRequests { - var capability = capability{} - if err := json.Unmarshal([]byte(sessionQueueRequest), &capability); err == nil { - if capability.BrowserName == browserName { - var platformNameMatches = capability.PlatformName == "" || strings.EqualFold(capability.PlatformName, platformName) - if strings.HasPrefix(capability.BrowserVersion, browserVersion) && platformNameMatches { - count++ - } else if len(strings.TrimSpace(capability.BrowserVersion)) == 0 && browserVersion == DefaultBrowserVersion && platformNameMatches { - count++ - } - } - } else { - logger.Error(err, fmt.Sprintf("Error when unmarshaling session queue requests: %s", err)) +func countMatchingSlotsStereotypes(stereotypes Stereotypes, request Capability, browserName string, browserVersion string, sessionBrowserName string, platformName string) int { + var matchingSlots int + for _, stereotype := range stereotypes { + if checkCapabilitiesMatch(stereotype.Stereotype, request, browserName, browserVersion, sessionBrowserName, platformName) { + matchingSlots += stereotype.Slots } } + return matchingSlots +} - var sessions = seleniumResponse.Data.SessionsInfo.Sessions +func countMatchingSessions(sessions Sessions, request Capability, browserName string, browserVersion string, sessionBrowserName string, platformName string, logger logr.Logger) int { + var matchingSessions int for _, session := range sessions { - var capability = capability{} + var capability = Capability{} if err := json.Unmarshal([]byte(session.Capabilities), &capability); err == nil { - var platformNameMatches = capability.PlatformName == "" || strings.EqualFold(capability.PlatformName, platformName) - if capability.BrowserName == sessionBrowserName { - if strings.HasPrefix(capability.BrowserVersion, sessionBrowserVersion) && platformNameMatches { - count++ - } else if browserVersion == DefaultBrowserVersion && platformNameMatches { - count++ - } + if checkCapabilitiesMatch(capability, request, browserName, browserVersion, sessionBrowserName, platformName) { + matchingSessions++ } } else { - logger.Error(err, fmt.Sprintf("Error when unmarshaling sessions info: %s", err)) + logger.Error(err, fmt.Sprintf("Error when unmarshaling session capabilities: %s", err)) } } + return matchingSessions +} - var gridMaxSession = int64(seleniumResponse.Data.Grid.MaxSession) - var gridNodeCount = int64(seleniumResponse.Data.Grid.NodeCount) +func checkCapabilitiesMatch(capability Capability, requestCapability Capability, browserName string, browserVersion string, sessionBrowserName string, platformName string) bool { + // Ensure the logic should be aligned with DefaultSlotMatcher in Selenium Grid - SeleniumHQ/selenium/java/src/org/openqa/selenium/grid/data/DefaultSlotMatcher.java + // A browserName matches when one of the following conditions is met: + // 1. `browserName` in capability matches with `browserName` or `sessionBrowserName` in scaler metadata + // 2. `browserName` in request capability is empty or not provided + var browserNameMatches = strings.EqualFold(capability.BrowserName, browserName) || strings.EqualFold(capability.BrowserName, sessionBrowserName) || + requestCapability.BrowserName == "" + // A browserVersion matches when one of the following conditions is met: + // 1. `browserVersion` in request capability is empty or not provided or `stable` + // 2. `browserVersion` in capability matches with prefix of the scaler metadata `browserVersion` + // 3. `browserVersion` in scaler metadata is `latest` + var browserVersionMatches = requestCapability.BrowserVersion == "" || requestCapability.BrowserVersion == "stable" || + strings.HasPrefix(capability.BrowserVersion, browserVersion) || browserVersion == DefaultBrowserVersion + // A platformName matches when one of the following conditions is met: + // 1. `platformName` in request capability is empty or not provided + // 2. `platformName` in capability is empty or not provided + // 3. `platformName` in capability matches with the scaler metadata `platformName` + // 4. `platformName` in scaler metadata is empty or not provided + var platformNameMatches = requestCapability.PlatformName == "" || capability.PlatformName == "" || + strings.EqualFold(capability.PlatformName, platformName) || platformName == "" + return browserNameMatches && browserVersionMatches && platformNameMatches +} - if setSessionsFromHub { - if slots == 0 { - slots = sessionsPerNode +func checkNodeReservedSlots(reservedNodes []ReservedNodes, nodeID string, availableSlots int) int { + for _, reservedNode := range reservedNodes { + if strings.EqualFold(reservedNode.ID, nodeID) { + return reservedNode.SlotCount } - var floatCount = float64(count) / float64(slots) - count = int64(math.Ceil(floatCount)) - } else if gridMaxSession > 0 && gridNodeCount > 0 { - // Get count, convert count to next highest int64 - var floatCount = float64(count) / (float64(gridMaxSession) / float64(gridNodeCount)) - count = int64(math.Ceil(floatCount)) } + return availableSlots +} - return count, nil +func updateOrAddReservedNode(reservedNodes []ReservedNodes, nodeID string, slotCount int, maxSession int) []ReservedNodes { + for i, reservedNode := range reservedNodes { + if strings.EqualFold(reservedNode.ID, nodeID) { + // Update remaining available slots for the reserved node + reservedNodes[i].SlotCount = slotCount + return reservedNodes + } + } + // Add new reserved node if not found + return append(reservedNodes, ReservedNodes{ID: nodeID, SlotCount: slotCount, MaxSession: maxSession}) } -func getSlotsFromSeleniumResponse(seleniumResponse seleniumResponse, browserName string, browserVersion string, platformName string, logger logr.Logger) int64 { - var slots int64 +func getCountFromSeleniumResponse(b []byte, browserName string, browserVersion string, sessionBrowserName string, platformName string, nodeMaxSessions int, logger logr.Logger) (int64, error) { + // The returned count of the number of new Nodes will be scaled up + var count int64 + // Track number of available slots of existing Nodes in the Grid can be reserved for the matched requests + var availableSlots int + // Track number of matched requests in the sessions queue will be served by this scaler + var queueSlots int + + var seleniumResponse = SeleniumResponse{} + if err := json.Unmarshal(b, &seleniumResponse); err != nil { + return 0, err + } + var sessionQueueRequests = seleniumResponse.Data.SessionsInfo.SessionQueueRequests var nodes = seleniumResponse.Data.NodesInfo.Nodes -slots: - for _, node := range nodes { - var stereotypes = []stereotype{} - if err := json.Unmarshal([]byte(node.Stereotypes), &stereotypes); err == nil { - for _, stereotype := range stereotypes { - if stereotype.Stereotype.BrowserName == browserName { - var platformNameMatches = stereotype.Stereotype.PlatformName == "" || strings.EqualFold(stereotype.Stereotype.PlatformName, platformName) - if strings.HasPrefix(stereotype.Stereotype.BrowserVersion, browserVersion) && platformNameMatches { - slots = stereotype.Slots - break slots - } else if len(strings.TrimSpace(stereotype.Stereotype.BrowserVersion)) == 0 && browserVersion == DefaultBrowserVersion && platformNameMatches { - slots = stereotype.Slots - break slots - } - } + // Track list of existing Nodes that have available slots for the matched requests + var reservedNodes []ReservedNodes + // Track list of new Nodes will be scaled up with number of available slots following scaler parameter `nodeMaxSessions` + var newRequestNodes []ReservedNodes + for requestIndex, sessionQueueRequest := range sessionQueueRequests { + var isRequestMatched bool + var requestCapability = Capability{} + if err := json.Unmarshal([]byte(sessionQueueRequest), &requestCapability); err == nil { + if checkCapabilitiesMatch(requestCapability, requestCapability, browserName, browserVersion, sessionBrowserName, platformName) { + queueSlots++ + isRequestMatched = true } } else { - logger.Error(err, fmt.Sprintf("Error when unmarshalling stereotypes: %s", err)) + logger.Error(err, fmt.Sprintf("Error when unmarshaling sessionQueueRequest capability: %s", err)) + } + + // Skip the request if the capability does not match the scaler parameters + if !isRequestMatched { + continue + } + + var isRequestReserved bool + // Check if the matched request can be assigned to available slots of existing Nodes in the Grid + for _, node := range nodes { + // Check if node is UP and has available slots (maxSession > sessionCount) + if strings.EqualFold(node.Status, "UP") && checkNodeReservedSlots(reservedNodes, node.ID, node.MaxSession-node.SessionCount) > 0 { + var stereotypes = Stereotypes{} + var availableSlotsMatch int + if err := json.Unmarshal([]byte(node.Stereotypes), &stereotypes); err == nil { + // Count available slots that match the request capability and scaler metadata + availableSlotsMatch += countMatchingSlotsStereotypes(stereotypes, requestCapability, browserName, browserVersion, sessionBrowserName, platformName) + } else { + logger.Error(err, fmt.Sprintf("Error when unmarshaling node stereotypes: %s", err)) + } + // Count ongoing sessions that match the request capability and scaler metadata + var currentSessionsMatch = countMatchingSessions(node.Sessions, requestCapability, browserName, browserVersion, sessionBrowserName, platformName, logger) + // Count remaining available slots can be reserved for this request + var availableSlotsCanBeReserved = checkNodeReservedSlots(reservedNodes, node.ID, node.MaxSession-node.SessionCount) + // Reserve one available slot for the request if available slots match is greater than current sessions match + if availableSlotsMatch > currentSessionsMatch { + availableSlots++ + reservedNodes = updateOrAddReservedNode(reservedNodes, node.ID, availableSlotsCanBeReserved-1, node.MaxSession) + isRequestReserved = true + break + } + } + } + // Check if the matched request can be assigned to available slots of new Nodes will be scaled up, since the scaler parameter `nodeMaxSessions` can be greater than 1 + if !isRequestReserved { + for _, newRequestNode := range newRequestNodes { + if newRequestNode.SlotCount > 0 { + newRequestNodes = updateOrAddReservedNode(newRequestNodes, newRequestNode.ID, newRequestNode.SlotCount-1, nodeMaxSessions) + isRequestReserved = true + break + } + } + } + // Check if a new Node should be scaled up to reserve for the matched request + if !isRequestReserved { + newRequestNodes = updateOrAddReservedNode(newRequestNodes, string(rune(requestIndex)), nodeMaxSessions-1, nodeMaxSessions) } } - return slots + + if queueSlots > availableSlots { + count = int64(len(newRequestNodes)) + } else { + count = 0 + } + + return count, nil } diff --git a/pkg/scalers/selenium_grid_scaler_test.go b/pkg/scalers/selenium_grid_scaler_test.go index 56de3b7f024..b92936cbe7f 100644 --- a/pkg/scalers/selenium_grid_scaler_test.go +++ b/pkg/scalers/selenium_grid_scaler_test.go @@ -11,14 +11,12 @@ import ( func Test_getCountFromSeleniumResponse(t *testing.T) { type args struct { - b []byte - browserName string - sessionBrowserName string - browserVersion string - platformName string - sessionsPerNode int64 - setSessionsFromHub bool - sessionBrowserVersion string + b []byte + browserName string + sessionBrowserName string + browserVersion string + platformName string + nodeMaxSessions int } tests := []struct { name string @@ -32,7 +30,6 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { b: []byte(nil), browserName: "", }, - // want: 0, wantErr: true, }, { @@ -41,141 +38,907 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { b: []byte(""), browserName: "", }, - // want: resource.NewQuantity(0, resource.DecimalSI), wantErr: true, }, { - name: "no active sessions should return count as 0", + name: "no sessionQueueRequests should return count as 0", args: args{ b: []byte(`{ - "data": { - "grid":{ - "maxSession": 0, - "nodeCount": 0 + "data": { + "grid": { + "sessionCount": 0, + "maxSession": 0, + "totalSlots": 0 }, "nodesInfo": { - "nodes": [] + "nodes": [] }, "sessionsInfo": { - "sessionQueueRequests": [], - "sessions": [] + "sessionQueueRequests": [] } + } } - }`), + `), browserName: "", }, want: 0, wantErr: false, }, { - name: "active sessions with no matching browsername should return count as 0", + name: "12 sessionQueueRequests with 4 requests matching browserName chrome should return count as 4", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 0, + "maxSession": 0, + "totalSlots": 0 + }, + "nodesInfo": { + "nodes": [] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\n \"browserName\": \"chrome\",\n \"goog:chromeOptions\": {\n \"extensions\": [\n ],\n \"args\": [\n \"disable-features=DownloadBubble,DownloadBubbleV2\"\n ]\n },\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_download_file (ChromeTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}", + "{\n \"acceptInsecureCerts\": true,\n \"browserName\": \"firefox\",\n \"moz:debuggerAddress\": true,\n \"moz:firefoxOptions\": {\n \"prefs\": {\n \"remote.active-protocols\": 3\n },\n \"profile\": \"profile\"\n },\n \"pageLoadStrategy\": \"normal\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_with_frames (FirefoxTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}", + "{\n \"acceptInsecureCerts\": true,\n \"browserName\": \"firefox\",\n \"moz:debuggerAddress\": true,\n \"moz:firefoxOptions\": {\n \"prefs\": {\n \"remote.active-protocols\": 3\n },\n \"profile\": \"profile\"\n },\n \"pageLoadStrategy\": \"normal\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_download_file (FirefoxTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}", + "{\n \"acceptInsecureCerts\": true,\n \"browserName\": \"firefox\",\n \"moz:debuggerAddress\": true,\n \"moz:firefoxOptions\": {\n \"prefs\": {\n \"remote.active-protocols\": 3\n },\n \"profile\": \"profile\"\n },\n \"pageLoadStrategy\": \"normal\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_title_and_maximize_window (FirefoxTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}", + "{\n \"browserName\": \"chrome\",\n \"goog:chromeOptions\": {\n \"extensions\": [\n ],\n \"args\": [\n \"disable-features=DownloadBubble,DownloadBubbleV2\"\n ]\n },\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_play_video (ChromeTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}", + "{\n \"browserName\": \"chrome\",\n \"goog:chromeOptions\": {\n \"extensions\": [\n ],\n \"args\": [\n \"disable-features=DownloadBubble,DownloadBubbleV2\"\n ]\n },\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_select_from_a_dropdown (ChromeTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}", + "{\n \"acceptInsecureCerts\": true,\n \"browserName\": \"firefox\",\n \"moz:debuggerAddress\": true,\n \"moz:firefoxOptions\": {\n \"prefs\": {\n \"remote.active-protocols\": 3\n },\n \"profile\": \"profile\"\n },\n \"pageLoadStrategy\": \"normal\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_visit_basic_auth_secured_page (FirefoxTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}", + "{\n \"acceptInsecureCerts\": true,\n \"browserName\": \"firefox\",\n \"moz:debuggerAddress\": true,\n \"moz:firefoxOptions\": {\n \"prefs\": {\n \"remote.active-protocols\": 3\n },\n \"profile\": \"profile\"\n },\n \"pageLoadStrategy\": \"normal\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_select_from_a_dropdown (FirefoxTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}", + "{\n \"browserName\": \"chrome\",\n \"goog:chromeOptions\": {\n \"extensions\": [\n ],\n \"args\": [\n \"disable-features=DownloadBubble,DownloadBubbleV2\"\n ]\n },\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_title (ChromeTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}", + "{\n \"acceptInsecureCerts\": true,\n \"browserName\": \"firefox\",\n \"moz:debuggerAddress\": true,\n \"moz:firefoxOptions\": {\n \"prefs\": {\n \"remote.active-protocols\": 3\n },\n \"profile\": \"profile\"\n },\n \"pageLoadStrategy\": \"normal\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_title (FirefoxTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}", + "{\n \"acceptInsecureCerts\": true,\n \"browserName\": \"firefox\",\n \"moz:debuggerAddress\": true,\n \"moz:firefoxOptions\": {\n \"prefs\": {\n \"remote.active-protocols\": 3\n },\n \"profile\": \"profile\"\n },\n \"pageLoadStrategy\": \"normal\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_accept_languages (FirefoxTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}", + "{\n \"acceptInsecureCerts\": true,\n \"browserName\": \"firefox\",\n \"moz:debuggerAddress\": true,\n \"moz:firefoxOptions\": {\n \"prefs\": {\n \"remote.active-protocols\": 3\n },\n \"profile\": \"profile\"\n },\n \"pageLoadStrategy\": \"normal\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_play_video (FirefoxTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}" + ] + } + } + } + `), + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "latest", + platformName: "linux", + }, + want: 4, + wantErr: false, + }, + { + name: "2 sessionQueueRequests and 1 available nodeStereotypes with matching browserName firefox should return count as 1", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 0, + "maxSession": 7, + "totalSlots": 7 + }, + "nodesInfo": { + "nodes": [ + { + "id": "82ee33bd-390e-4dd6-aee2-06b17ecee18e", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[\n {\n \"slots\": 1,\n \"stereotype\": {\n \"browserName\": \"chrome\",\n \"browserVersion\": \"128.0\",\n \"goog:chromeOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002fchromium\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-chrome-name-m5n8z-4br6x\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n }\n }\n]", + "sessions": [ + { + "id": "reserved", + "capabilities": "{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"128.0\",\n \"goog:chromeOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002fchromium\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-chrome-name-m5n8z-4br6x\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}", + "slot": { + "id": "83c9d9f5-f79d-4dea-bc9b-ce61bf2bc01c", + "stereotype": "{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"128.0\",\n \"goog:chromeOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002fchromium\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-chrome-name-m5n8z-4br6x\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}" + } + } + ] + }, + { + "id": "b4d3d31a-3239-4c09-a5f5-3650d4fcef48", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[\n {\n \"slots\": 1,\n \"stereotype\": {\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-s2gq6-82lwb\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n }\n }\n]", + "sessions": [ + { + "id": "reserved", + "capabilities": "{\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-s2gq6-82lwb\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}", + "slot": { + "id": "b03b80c0-95f8-4b9c-ba06-bebd2568ce3d", + "stereotype": "{\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-s2gq6-82lwb\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}" + } + } + ] + }, + { + "id": "f3e67bf7-3c40-42d4-ab10-666b49c88925", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[\n {\n \"slots\": 1,\n \"stereotype\": {\n \"browserName\": \"chrome\",\n \"browserVersion\": \"128.0\",\n \"goog:chromeOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002fchromium\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-chrome-name-xh95p-9c2cl\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n }\n }\n]", + "sessions": [] + }, + { + "id": "f1e315fe-5f32-4a73-bb31-b73ed9a728e5", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[\n {\n \"slots\": 1,\n \"stereotype\": {\n \"browserName\": \"chrome\",\n \"browserVersion\": \"128.0\",\n \"goog:chromeOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002fchromium\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-chrome-name-j2xbn-lq76c\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n }\n }\n]", + "sessions": [ + { + "id": "reserved", + "capabilities": "{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"128.0\",\n \"goog:chromeOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002fchromium\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-chrome-name-j2xbn-lq76c\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}", + "slot": { + "id": "9d91cd87-b443-4a0c-93e7-eea8c4661207", + "stereotype": "{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"128.0\",\n \"goog:chromeOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002fchromium\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-chrome-name-j2xbn-lq76c\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}" + } + } + ] + }, + { + "id": "0ae48415-a230-4bc4-a26c-4fc4ffc3abc1", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[\n {\n \"slots\": 1,\n \"stereotype\": {\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-xk6mm-2m6jh\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n }\n }\n]", + "sessions": [ + { + "id": "reserved", + "capabilities": "{\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-xk6mm-2m6jh\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}", + "slot": { + "id": "2c1fc5c4-881a-48fd-9b9e-b4d3ecbc1bd8", + "stereotype": "{\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-xk6mm-2m6jh\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}" + } + } + ] + }, + { + "id": "284fa982-5be0-44a6-b64e-e2e76fe52d1f", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[\n {\n \"slots\": 1,\n \"stereotype\": {\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-bvq59-6dh6q\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n }\n }\n]", + "sessions": [ + { + "id": "reserved", + "capabilities": "{\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-bvq59-6dh6q\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}", + "slot": { + "id": "5f8f9ba0-0f61-473e-b367-b68d9368dc24", + "stereotype": "{\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-bvq59-6dh6q\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}" + } + } + ] + }, + { + "id": "451442d0-3649-4b21-a5a5-32bc847f1765", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[\n {\n \"slots\": 1,\n \"stereotype\": {\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-42xbf-zpdd4\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n }\n }\n]", + "sessions": [] + }, + { + "id": "a4d26330-e5be-4630-b4da-9078f2495ece", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[\n {\n \"slots\": 1,\n \"stereotype\": {\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-qt9z2-6xx86\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n }\n }\n]", + "sessions": [ + { + "id": "reserved", + "capabilities": "{\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-qt9z2-6xx86\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}", + "slot": { + "id": "38bd0b09-ffe0-46e9-8983-bd208270c8da", + "stereotype": "{\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-qt9z2-6xx86\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}" + } + } + ] + }, + { + "id": "e81f0038-fc72-4045-9de1-b98143053eae", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[\n {\n \"slots\": 1,\n \"stereotype\": {\n \"browserName\": \"chrome\",\n \"browserVersion\": \"128.0\",\n \"goog:chromeOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002fchromium\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-chrome-name-v7nrv-xsfkb\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n }\n }\n]", + "sessions": [ + { + "id": "reserved", + "capabilities": "{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"128.0\",\n \"goog:chromeOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002fchromium\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-chrome-name-v7nrv-xsfkb\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}", + "slot": { + "id": "43b992cc-39bb-4b0f-92b6-99603a543459", + "stereotype": "{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"128.0\",\n \"goog:chromeOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002fchromium\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-chrome-name-v7nrv-xsfkb\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n}" + } + } + ] + } + ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\n \"acceptInsecureCerts\": true,\n \"browserName\": \"firefox\",\n \"moz:debuggerAddress\": true,\n \"moz:firefoxOptions\": {\n \"prefs\": {\n \"remote.active-protocols\": 3\n },\n \"profile\": \"profile\"\n },\n \"pageLoadStrategy\": \"normal\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_accept_languages (FirefoxTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}", + "{\n \"acceptInsecureCerts\": true,\n \"browserName\": \"firefox\",\n \"moz:debuggerAddress\": true,\n \"moz:firefoxOptions\": {\n \"prefs\": {\n \"remote.active-protocols\": 3\n },\n \"profile\": \"profile\"\n },\n \"pageLoadStrategy\": \"normal\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_play_video (FirefoxTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}" + ] + } + } + } + `), + browserName: "firefox", + sessionBrowserName: "firefox", + browserVersion: "latest", + platformName: "linux", + }, + want: 1, + wantErr: false, + }, + { + name: "1 sessionQueueRequests and 1 available nodeStereotypes with matching browserName chrome should return count as 0", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 0, + "maxSession": 0, + "totalSlots": 0 + }, + "nodesInfo": { + "nodes": [ + { + "id": "f3e67bf7-3c40-42d4-ab10-666b49c88925", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[\n {\n \"slots\": 1,\n \"stereotype\": {\n \"browserName\": \"chrome\",\n \"browserVersion\": \"128.0\",\n \"goog:chromeOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002fchromium\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-chrome-name-xh95p-9c2cl\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n }\n }\n]", + "sessions": [] + }, + { + "id": "451442d0-3649-4b21-a5a5-32bc847f1765", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[\n {\n \"slots\": 1,\n \"stereotype\": {\n \"browserName\": \"firefox\",\n \"browserVersion\": \"130.0\",\n \"moz:firefoxOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002ffirefox\"\n },\n \"platformName\": \"linux\",\n \"se:containerName\": \"my-firefox-name-42xbf-zpdd4\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n }\n }\n]", + "sessions": [] + } + ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\n \"acceptInsecureCerts\": true,\n \"browserName\": \"firefox\",\n \"moz:debuggerAddress\": true,\n \"moz:firefoxOptions\": {\n \"prefs\": {\n \"remote.active-protocols\": 3\n },\n \"profile\": \"profile\"\n },\n \"pageLoadStrategy\": \"normal\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_accept_languages (FirefoxTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}", + "{\n \"browserName\": \"chrome\",\n \"goog:chromeOptions\": {\n \"extensions\": [\n ],\n \"args\": [\n \"disable-features=DownloadBubble,DownloadBubbleV2\"\n ]\n },\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"se:downloadsEnabled\": true,\n \"se:name\": \"test_visit_basic_auth_secured_page (ChromeTests)\",\n \"se:recordVideo\": true,\n \"se:screenResolution\": \"1920x1080\"\n}" + ] + } + } + } + `), + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "latest", + platformName: "linux", + }, + want: 0, + wantErr: false, + }, + { + name: "1 sessionQueueRequests Linux and 1 available nodeStereotypes Windows with matching browserName chrome should return count as 1", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 0, + "maxSession": 2, + "totalSlots": 2 + }, + "nodesInfo": { + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"128.0\", \"platformName\": \"Windows 11\"}}]", + "sessions": [] + }, + { + "id": "node-2", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"firefox\", \"browserVersion\": \"130.0\", \"platformName\": \"Windows 11\"}}]", + "sessions": [] + } + ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"firefox\", \"browserVersion\": \"130.0\", \"platformName\": \"Linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"128.0\", \"platformName\": \"Linux\"}" + ] + } + } + } + `), + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "latest", + platformName: "linux", + }, + want: 1, + wantErr: false, + }, + { + name: "scaler browserVersion is latest, 2 sessionQueueRequests wihtout browserVersion, 2 available nodeStereotypes with different versions and platforms, should return count as 1", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 0, + "maxSession": 0, + "totalSlots": 0 + }, + "nodesInfo": { + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [] + }, + { + "id": "node-2", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"92.0\", \"platformName\": \"Windows 11\"}}]", + "sessions": [] + } + ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}" + ] + } + } + }`), + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "latest", + platformName: "linux", + }, + want: 1, + wantErr: false, + }, + { + name: "scaler browserVersion is latest, 5 sessionQueueRequests wihtout browserVersion also 1 different platformName, 1 available nodeStereotypes with 3 slots Linux and 1 node Windows, should return count as 1", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 0, + "maxSession": 6, + "totalSlots": 6 + }, + "nodesInfo": { + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 0, + "maxSession": 3, + "slotCount": 3, + "stereotypes": "[{\"slots\": 3, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [] + }, + { + "id": "node-2", + "status": "UP", + "sessionCount": 0, + "maxSession": 3, + "slotCount": 3, + "stereotypes": "[{\"slots\": 3, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"92.0\", \"platformName\": \"Windows 11\"}}]", + "sessions": [] + } + ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"Windows 11\"}" + ] + } + } + }`), + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "latest", + platformName: "linux", + }, + want: 1, + wantErr: false, + }, + { + name: "queue request with browserName browserVersion and browserVersion but no available nodes should return count as 1", args: args{ b: []byte(`{ "data": { - "grid":{ + "grid": { + "sessionCount": 1, "maxSession": 1, - "nodeCount": 1 + "totalSlots": 1 }, "nodesInfo": { - "nodes": [] + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"firefox\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"firefox\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] + } + ] }, "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\"\n}","{\n \"browserName\": \"chrome\"\n}"], - "sessions": [ + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + ] + } + } + }`), + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "91.0", + platformName: "linux", + }, + want: 1, + wantErr: false, + }, + { + name: "1 queue request with browserName browserVersion and browserVersion but 2 nodes without available slots should return count as 1", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 2, + "maxSession": 2, + "totalSlots": 2 + }, + "nodesInfo": { + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] + }, { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "id": "node-2", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-2", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] } ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + ] } } }`), - browserName: "", - sessionBrowserName: "", - browserVersion: "latest", + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "91.0", platformName: "linux", }, - want: 0, + want: 1, wantErr: false, }, { - name: "active sessions with matching browsername should return count as 2", + name: "2 session queue with matching browsername and browserversion of 2 available slots should return count as 0", args: args{ b: []byte(`{ "data": { - "grid":{ - "maxSession": 1, - "nodeCount": 1 + "grid": { + "sessionCount": 0, + "maxSession": 2, + "totalSlots": 2 }, "nodesInfo": { - "nodes": [] + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [] + }, + { + "id": "node-2", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [] + } + ] }, "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\"\n}","{\n \"browserName\": \"chrome\"\n}"], - "sessions": [] + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + ] } } }`), browserName: "chrome", sessionBrowserName: "chrome", - browserVersion: "latest", + browserVersion: "91.0", platformName: "linux", }, - want: 2, + want: 0, wantErr: false, }, { - name: "2 session queue with matching browsername and browserversion should return count as 1", + name: "2 queue requests with browserName browserVersion and platformName matching 2 available slots on 2 different nodes should return count as 0", args: args{ b: []byte(`{ "data": { - "grid":{ + "grid": { + "sessionCount": 2, "maxSession": 4, - "nodeCount": 2 + "totalSlots": 4 }, "nodesInfo": { - "nodes": [] + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 1, + "maxSession": 2, + "slotCount": 2, + "stereotypes": "[{\"slots\": 2, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] + }, + { + "id": "node-2", + "status": "UP", + "sessionCount": 1, + "maxSession": 2, + "slotCount": 2, + "stereotypes": "[{\"slots\": 2, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-2", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] + } + ] }, "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0\"\n}","{\n \"browserName\": \"chrome\"\n}","{\n \"browserName\": \"chrome\"\n}"] + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + ] } } }`), browserName: "chrome", sessionBrowserName: "chrome", - browserVersion: "latest", + browserVersion: "91.0", platformName: "linux", }, - want: 1, + want: 0, wantErr: false, }, { - name: "2 active sessions with matching browsername on 2 nodes and maxSession=4 should return count as 1 (rounded up from 0.75)", + name: "1 queue request with browserName browserVersion and platformName matching 1 available slot on node has 3 max sessions should return count as 0", args: args{ b: []byte(`{ "data": { - "grid":{ - "maxSession": 4, - "nodeCount": 1 + "grid": { + "sessionCount": 2, + "maxSession": 3, + "totalSlots": 3 }, "nodesInfo": { - "nodes": [] + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 2, + "maxSession": 3, + "slotCount": 3, + "stereotypes": "[{\"slots\": 3, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + }, + { + "id": "session-2", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] + } + ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + ] + } + } + }`), + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "91.0", + platformName: "linux", + }, + want: 0, + wantErr: false, + }, + { + name: "3 queue requests with browserName browserVersion and platformName but 2 running nodes are busy should return count as 3", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 2, + "maxSession": 2, + "totalSlots": 2 + }, + "nodesInfo": { + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] + }, + { + "id": "node-2", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-2", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] + } + ] }, "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0\"\n}","{\n \"browserName\": \"chrome\"\n}","{\n \"browserName\": \"chrome\"\n}"], - "sessions": [ + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + ] + } + } + }`), + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "91.0", + platformName: "linux", + }, + want: 3, + wantErr: false, + }, + { + name: "3 queue requests with browserName browserVersion and platformName but 2 running nodes are busy with different versions should return count as 3", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 2, + "maxSession": 2, + "totalSlots": 2 + }, + "nodesInfo": { + "nodes": [ { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "id": "node-1", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] }, { - "id": "0f9c5a941aa4d755a54b84be1f6535b2", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d" + "id": "node-2", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-2", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] } ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"browserVersion\": \"90.0\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"92.0\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"93.0\", \"platformName\": \"linux\"}" + ] + } + } + }`), + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "latest", + platformName: "linux", + }, + want: 3, + wantErr: false, + }, + { + name: "3 queue requests with browserName and platformName but 2 running nodes are busy with different versions should return count as 3", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 2, + "maxSession": 2, + "totalSlots": 2 + }, + "nodesInfo": { + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] + }, + { + "id": "node-2", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-2", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] + } + ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}" + ] + } + } + }`), + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "latest", + platformName: "linux", + }, + want: 3, + wantErr: false, + }, + { + name: "1 active session with matching browsername and version should return count as 2", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 1, + "maxSession": 1, + "totalSlots": 1 + }, + "nodesInfo": { + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] + } + ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + ] } } }`), @@ -184,109 +947,293 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { browserVersion: "91.0", platformName: "linux", }, + want: 2, + wantErr: false, + }, + { + name: "1 request without browserName and browserVersion stable can be match any available node should return count as 0", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 0, + "maxSession": 1, + "totalSlots": 1 + }, + "nodesInfo": { + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"v128.0\", \"platformName\": \"linux\"}}]", + "sessions": [] + } + ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserVersion\": \"stable\", \"platformName\": \"linux\"}" + ] + } + } + }`), + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "latest", + platformName: "linux", + }, + want: 0, + wantErr: false, + }, + { + name: "1 request without browserName and browserVersion stable should return count as 1", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 1, + "maxSession": 1, + "totalSlots": 1 + }, + "nodesInfo": { + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"v128.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"v128.0\", \"platformName\": \"linux\"}" + } + ] + } + ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserVersion\": \"stable\", \"platformName\": \"linux\"}" + ] + } + } + }`), + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "latest", + platformName: "linux", + }, want: 1, wantErr: false, }, { - name: "2 active sessions with matching browsername on 1 node and maxSession=3 should return count as 1 (rounded up from 0.33)", + name: "2 queue requests with browserName in string match node stereotype and scaler metadata browserVersion should return count as 1", args: args{ b: []byte(`{ "data": { - "grid":{ - "maxSession": 3, - "nodeCount": 1 + "grid": { + "sessionCount": 1, + "maxSession": 1, + "totalSlots": 1 }, "nodesInfo": { - "nodes": [] + "nodes": [ + { + "id": "node-1", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"msedge\", \"browserVersion\": \"dev\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"msedge\", \"browserVersion\": \"dev\", \"platformName\": \"linux\"}" + } + ] + } + ] }, "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0\"\n}","{\n \"browserName\": \"chrome\"\n}"], - "sessions": [ + "sessionQueueRequests": [ + "{\"browserName\": \"MicrosoftEdge\", \"browserVersion\": \"beta\", \"platformName\": \"linux\"}", + "{\"browserName\": \"MicrosoftEdge\", \"browserVersion\": \"dev\", \"platformName\": \"linux\"}" + ] + } + } + }`), + browserName: "MicrosoftEdge", + sessionBrowserName: "msedge", + browserVersion: "dev", + platformName: "linux", + }, + want: 1, + wantErr: false, + }, + { + name: "2 queue requests with matching browsername/sessionBrowserName but 1 node is busy should return count as 2", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 1, + "maxSession": 1, + "totalSlots": 1 + }, + "nodesInfo": { + "nodes": [ { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" - }, + "id": "node-1", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"msedge\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"msedge\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] + } + ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"MicrosoftEdge\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}", + "{\"browserName\": \"MicrosoftEdge\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + ] + } + } + }`), + browserName: "MicrosoftEdge", + sessionBrowserName: "msedge", + browserVersion: "91.0", + platformName: "linux", + }, + want: 2, + wantErr: false, + }, + { + name: "2 queue requests with matching browsername/sessionBrowserName and 1 node is is available should return count as 1", + args: args{ + b: []byte(`{ + "data": { + "grid": { + "sessionCount": 0, + "maxSession": 1, + "totalSlots": 1 + }, + "nodesInfo": { + "nodes": [ { - "id": "0f9c5a941aa4d755a54b84be1f6535b2", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d" + "id": "node-1", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"msedge\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [] } ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"MicrosoftEdge\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}", + "{\"browserName\": \"MicrosoftEdge\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + ] } } }`), - browserName: "chrome", - sessionBrowserName: "chrome", - browserVersion: "latest", + browserName: "MicrosoftEdge", + sessionBrowserName: "msedge", + browserVersion: "91.0", platformName: "linux", }, want: 1, wantErr: false, - }, - { - name: "2 active sessions with matching browsername on 2 nodes should return count as 5", + }, { + name: "2 queue requests with platformName and without platformName and node with 1 slot available should return count as 1", args: args{ b: []byte(`{ "data": { - "grid":{ + "grid": { + "sessionCount": 1, "maxSession": 2, - "nodeCount": 2 + "totalSlots": 2 }, "nodesInfo": { - "nodes": [] - }, - "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0\"\n}","{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0\"\n}","{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0\"\n}"], - "sessions": [ - { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" - }, + "nodes": [ { - "id": "0f9c5a941aa4d755a54b84be1f6535b2", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d" + "id": "node-1", + "status": "UP", + "sessionCount": 1, + "maxSession": 2, + "slotCount": 2, + "stereotypes": "[{\"slots\": 2, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"Windows 11\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"Windows 11\"}" + } + ] } ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"Windows 11\"}" + ] } } }`), browserName: "chrome", sessionBrowserName: "chrome", browserVersion: "91.0", - platformName: "linux", + platformName: "Windows 11", }, - want: 5, + want: 1, wantErr: false, }, { - name: "2 active sessions with matching browsername on 2 nodes with 3 other versions in queue should return count as 2 with default browserVersion and PlatformName", + name: "1 active msedge session while asking for 2 chrome sessions should return a count of 2", args: args{ b: []byte(`{ "data": { - "grid":{ - "maxSession": 2, - "nodeCount": 2 + "grid": { + "sessionCount": 1, + "maxSession": 1, + "totalSlots": 1 }, "nodesInfo": { - "nodes": [] - }, - "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0\"\n}","{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0\"\n}","{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0\"\n}"], - "sessions": [ - { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" - }, + "nodes": [ { - "id": "0f9c5a941aa4d755a54b84be1f6535b2", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d" + "id": "node-1", + "status": "UP", + "sessionCount": 1, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"msedge\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"msedge\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] } ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}" + ] } } }`), @@ -299,31 +1246,39 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { wantErr: false, }, { - name: "2 active sessions with matching browsername on 2 nodes should return count as 5 with default browserVersion / PlatformName and incoming sessions do not have versions", + name: "3 queue requests browserName chrome platformName linux but 1 node has maxSessions=3 with browserName msedge should return a count of 3", args: args{ b: []byte(`{ "data": { - "grid":{ - "maxSession": 2, - "nodeCount": 2 + "grid": { + "sessionCount": 1, + "maxSession": 3, + "totalSlots": 3 }, "nodesInfo": { - "nodes": [] - }, - "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\"}","{\n \"browserName\": \"chrome\"}","{\n \"browserName\": \"chrome\"}"], - "sessions": [ - { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" - }, + "nodes": [ { - "id": "0f9c5a941aa4d755a54b84be1f6535b2", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d" + "id": "node-1", + "status": "UP", + "sessionCount": 1, + "maxSession": 3, + "slotCount": 3, + "stereotypes": "[{\"slots\": 3, \"stereotype\": {\"browserName\": \"msedge\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}}]", + "sessions": [ + { + "id": "session-1", + "capabilities": "{\"browserName\": \"msedge\", \"browserVersion\": \"91.0\", \"platformName\": \"linux\"}" + } + ] } ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}" + ] } } }`), @@ -332,208 +1287,274 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { browserVersion: "latest", platformName: "linux", }, - want: 5, + want: 3, wantErr: false, }, { - name: "1 active session with matching browsername and version should return count as 2", + name: "session request with matching browsername and no specific platformName should return count as 2", args: args{ b: []byte(`{ "data": { - "grid":{ - "maxSession": 1, - "nodeCount": 1 + "grid": { + "maxSession": 0, + "nodeCount": 0, + "totalSlots": 0 }, "nodesInfo": { "nodes": [] }, "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0\"\n}","{\n \"browserName\": \"chrome\"\n}"], - "sessions": [ - { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" - } + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"Windows 11\"}" ] } } }`), browserName: "chrome", sessionBrowserName: "chrome", - browserVersion: "91.0", - platformName: "linux", + browserVersion: "latest", + platformName: "", }, want: 2, wantErr: false, }, { - name: "1 active msedge session with matching browsername/sessionBrowserName should return count as 3", + name: "2 queue requests with 1 matching browsername and platformName and 1 existing slot is available should return count as 0", args: args{ b: []byte(`{ "data": { - "grid":{ + "grid": { + "sessionCount": 0, "maxSession": 1, - "nodeCount": 1 + "totalSlots": 1 }, "nodesInfo": { - "nodes": [] - }, - "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"MicrosoftEdge\",\n \"browserVersion\": \"91.0\"\n}","{\n \"browserName\": \"MicrosoftEdge\",\n \"browserVersion\": \"91.0\"\n}"], - "sessions": [ + "nodes": [ { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"msedge\",\n \"browserVersion\": \"91.0.4472.114\",\n \"msedge\": {\n \"msedgedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"ms:edgeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "id": "node-1", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes": "[{\"slots\": 1, \"stereotype\": {\"browserName\": \"chrome\", \"browserVersion\": \"91.0\", \"platformName\": \"Windows 11\"}}]", + "sessions": [] } ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"platformName\": \"Windows 11\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}" + ] } } }`), - browserName: "MicrosoftEdge", - sessionBrowserName: "msedge", - browserVersion: "91.0", - platformName: "linux", + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "latest", + platformName: "Windows 11", }, - want: 3, + want: 0, wantErr: false, }, { - name: "1 active msedge session while asking for 2 chrome sessions should return a count of 2", + name: "2 queue requests with 1 request matching browserName and platformName but 1 existing node is busy should return count as 1", args: args{ b: []byte(`{ "data": { - "grid":{ - "maxSession": 1, - "nodeCount": 1 + "grid": { + "sessionCount": 2, + "maxSession": 2, + "totalSlots": 2 }, "nodesInfo": { - "nodes": [] - }, - "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\"\n}","{\n \"browserName\": \"chrome\"\n}"], - "sessions": [ + "nodes": [ { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"msedge\",\n \"browserVersion\": \"91.0.4472.114\",\n \"msedge\": {\n \"msedgedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"ms:edgeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "id": "82ee33bd-390e-4dd6-aee2-06b17ecee18e", + "status": "UP", + "sessionCount": 2, + "maxSession": 2, + "slotCount": 2, + "stereotypes": "[\n {\n \"slots\": 2,\n \"stereotype\": {\n \"browserName\": \"chrome\",\n \"browserVersion\": \"128.0\",\n \"goog:chromeOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002fchromium\"\n },\n \"se:containerName\": \"my-chrome-name-m5n8z-4br6x\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n }\n }\n]", + "sessions": [ + { + "id": "0f9c5a941aa4d755a54b84be1f6535b1", + "capabilities": "{\"browserName\": \"chrome\", \"platformName\": \"Windows 11\", \"browserVersion\": \"91.0\"}" + }, + { + "id": "0f9c5a941aa4d755a54b84be1f6535b1", + "capabilities": "{\"browserName\": \"chrome\", \"platformName\": \"linux\", \"browserVersion\": \"91.0\"}" + } + ] } ] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"Windows 11\"}" + ] } } }`), browserName: "chrome", sessionBrowserName: "chrome", - browserVersion: "latest", - platformName: "linux", + browserVersion: "91.0", + platformName: "Windows 11", }, - want: 2, + want: 1, wantErr: false, }, { - name: "1 active msedge session with maxSessions=3 while asking for 3 chrome sessions should return a count of 1", + name: "5 queue requests with scaler parameter nodeMaxSessions is 2 should return count as 3", args: args{ b: []byte(`{ - "data": { - "grid":{ - "maxSession": 3, - "nodeCount": 1 - }, - "nodesInfo": { - "nodes": [] - }, - "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\"\n}","{\n \"browserName\": \"chrome\"\n}","{\n \"browserName\": \"chrome\"\n}"], - "sessions": [ - { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"msedge\",\n \"browserVersion\": \"91.0.4472.114\",\n \"msedge\": {\n \"msedgedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"ms:edgeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" - } - ] - } + "data": { + "grid": { + "sessionCount": 0, + "maxSession": 0, + "totalSlots": 0 + }, + "nodesInfo": { + "nodes": [] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"browserVersion\": \"128.0\", \"platformName\": \"Linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"128.0\", \"platformName\": \"Linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"128.0\", \"platformName\": \"Linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"128.0\", \"platformName\": \"Linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"128.0\", \"platformName\": \"Linux\"}" + ] } - }`), + } + } + `), browserName: "chrome", sessionBrowserName: "chrome", browserVersion: "latest", platformName: "linux", + nodeMaxSessions: 2, }, - want: 1, + want: 3, wantErr: false, }, { - name: "session request with matching browsername and no specific platformName should return count as 2", + name: "5 queue requests with scaler parameter nodeMaxSessions is 3 should return count as 2", args: args{ b: []byte(`{ - "data": { - "grid":{ - "maxSession": 1, - "nodeCount": 1 - }, - "nodesInfo": { - "nodes": [] - }, - "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"Windows 11\"\n}"], - "sessions": [] - } + "data": { + "grid": { + "sessionCount": 0, + "maxSession": 0, + "totalSlots": 0 + }, + "nodesInfo": { + "nodes": [] + }, + "sessionsInfo": { + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"browserVersion\": \"128.0\", \"platformName\": \"Linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"128.0\", \"platformName\": \"Linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"128.0\", \"platformName\": \"Linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"128.0\", \"platformName\": \"Linux\"}", + "{\"browserName\": \"chrome\", \"browserVersion\": \"128.0\", \"platformName\": \"Linux\"}" + ] } - }`), + } + } + `), browserName: "chrome", sessionBrowserName: "chrome", browserVersion: "latest", - platformName: "Windows 11", + platformName: "linux", + nodeMaxSessions: 3, }, want: 2, wantErr: false, }, { - name: "sessions requests with matching browsername and platformName should return count as 1", + name: "5 queue requests with request matching browserName and platformName and scaler param nodeMaxSessions is 3 and existing node with 1 available slot should return count as 2", args: args{ b: []byte(`{ "data": { - "grid":{ - "maxSession": 1, - "nodeCount": 1 + "grid": { + "sessionCount": 2, + "maxSession": 3, + "totalSlots": 3 }, "nodesInfo": { - "nodes": [] + "nodes": [ + { + "id": "82ee33bd-390e-4dd6-aee2-06b17ecee18e", + "status": "UP", + "sessionCount": 2, + "maxSession": 3, + "slotCount": 3, + "stereotypes": "[\n {\n \"slots\": 3,\n \"stereotype\": {\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\",\n \"browserVersion\": \"91.0\",\n \"goog:chromeOptions\": {\n \"binary\": \"\\u002fusr\\u002fbin\\u002fchromium\"\n },\n \"se:containerName\": \"my-chrome-name-m5n8z-4br6x\",\n \"se:downloadsEnabled\": true,\n \"se:noVncPort\": 7900,\n \"se:vncEnabled\": true\n }\n }\n]", + "sessions": [ + { + "id": "0f9c5a941aa4d755a54b84be1f6535b1", + "capabilities": "{\"browserName\": \"chrome\", \"platformName\": \"Linux\", \"browserVersion\": \"91.0\"}" + }, + { + "id": "0f9c5a941aa4d755a54b84be1f6535b1", + "capabilities": "{\"browserName\": \"chrome\", \"platformName\": \"linux\", \"browserVersion\": \"91.0\"}" + } + ] + } + ] }, "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"Windows 11\"\n}"], - "sessions": [] + "sessionQueueRequests": [ + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}", + "{\"browserName\": \"chrome\", \"platformName\": \"linux\"}" + ] } } }`), browserName: "chrome", sessionBrowserName: "chrome", - browserVersion: "latest", - platformName: "Windows 11", + browserVersion: "91.0", + platformName: "linux", + nodeMaxSessions: 3, }, - want: 1, + want: 2, wantErr: false, }, + // Tests from PR: https://github.com/kedacore/keda/pull/6055 { - name: "sessions requests with matching browsername and platformName when setSessionsFromHub turned on and node with 2 slots matches should return count as 1", + name: "sessions requests with matching browsername and platformName when setSessionsFromHub turned on and node with 1 slots matches should return count as 0", args: args{ b: []byte(`{ "data": { - "grid":{ + "grid": { + "sessionCount": 0, "maxSession": 1, - "nodeCount": 1 + "totalSlots": 1 }, "nodesInfo": { "nodes": [ { - "stereotypes":"[{\"slots\":1,\"stereotype\":{\"browserName\":\"chrome\",\"platformName\":\"linux\"}}]" + "id": "82ee33bd-390e-4dd6-aee2-06b17ecee18e", + "status": "UP", + "sessionCount": 0, + "maxSession": 1, + "slotCount": 1, + "stereotypes":"[{\"slots\":1,\"stereotype\":{\"browserName\":\"chrome\",\"platformName\":\"linux\"}}]", + "sessions": [] } ] }, "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"Windows 11\"\n}"], - "sessions": [] + "sessionQueueRequests": [ + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}", + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"Windows 11\"\n}" + ] } } }`), @@ -541,9 +1562,8 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { sessionBrowserName: "chrome", browserVersion: "latest", platformName: "linux", - setSessionsFromHub: true, }, - want: 1, + want: 0, wantErr: false, }, { @@ -551,20 +1571,32 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { args: args{ b: []byte(`{ "data": { - "grid":{ - "maxSession": 1, - "nodeCount": 1 + "grid": { + "sessionCount": 0, + "maxSession": 2, + "totalSlots": 2 }, "nodesInfo": { "nodes": [ { - "stereotypes":"[{\"slots\":2,\"stereotype\":{\"browserName\":\"chrome\",\"platformName\":\"linux\"}}]" + "id": "82ee33bd-390e-4dd6-aee2-06b17ecee18e", + "status": "UP", + "sessionCount": 0, + "maxSession": 2, + "slotCount": 2, + "stereotypes":"[{\"slots\":2,\"stereotype\":{\"browserName\":\"chrome\",\"platformName\":\"linux\"}}]", + "sessions": [ + ] } ] }, "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"Windows 11\"\n}"], - "sessions": [] + "sessionQueueRequests": [ + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}", + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}", + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}", + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}", + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"Windows 11\"\n}"] } } }`), @@ -572,7 +1604,6 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { sessionBrowserName: "chrome", browserVersion: "latest", platformName: "linux", - setSessionsFromHub: true, }, want: 2, wantErr: false, @@ -582,16 +1613,21 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { args: args{ b: []byte(`{ "data": { - "grid":{ - "maxSession": 1, - "nodeCount": 1 + "grid": { + "sessionCount": 0, + "maxSession": 0, + "totalSlots": 0 }, "nodesInfo": { "nodes": [] }, "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"Windows 11\"\n}"], - "sessions": [] + "sessionQueueRequests": [ + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}", + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}", + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}", + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}", + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"Windows 11\"\n}"] } } }`), @@ -599,90 +1635,65 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { sessionBrowserName: "chrome", browserVersion: "latest", platformName: "linux", - setSessionsFromHub: true, - sessionsPerNode: 2, + nodeMaxSessions: 2, }, want: 2, wantErr: false, }, { - name: "sessions requests and active sessions with matching browsername and platformName should return count as 2", + name: "sessions requests and active sessions with 1 matching browsername, platformName and sessionBrowserVersion should return count as 1", args: args{ b: []byte(`{ "data": { - "grid":{ - "maxSession": 1, - "nodeCount": 1 + "grid": { + "sessionCount": 2, + "maxSession": 2, + "totalSlots": 2 }, "nodesInfo": { - "nodes": [] - }, - "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"Windows 11\",\n \"browserVersion\": \"91.0\"\n}"], - "sessions": [ - { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"Windows 11\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" - }, + "nodes": [ { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "id": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "status": "UP", + "sessionCount": 2, + "maxSession": 2, + "slotCount": 2, + "stereotypes":"[{\"slots\":2,\"stereotype\":{\"browserName\":\"chrome\",\"platformName\":\"linux\"}}]", + "sessions": [ + { + "id": "0f9c5a941aa4d755a54b84be1f6535b1", + "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + }, + { + "id": "0f9c5a941aa4d755a54b84be1f6535b1", + "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + } + ] } ] - } - } - }`), - browserName: "chrome", - sessionBrowserName: "chrome", - browserVersion: "91.0", - platformName: "Windows 11", - }, - want: 2, - wantErr: false, - }, - { - name: "sessions requests and active sessions with matching browsername, platformName and sessionBrowserVersion should return count as 3", - args: args{ - b: []byte(`{ - "data": { - "grid":{ - "maxSession": 1, - "nodeCount": 1 - }, - "nodesInfo": { - "nodes": [] }, "sessionsInfo": { - "sessionQueueRequests": ["{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"Windows 11\",\n \"browserVersion\": \"91.0\"\n}"], - "sessions": [ - { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" - }, - { - "id": "0f9c5a941aa4d755a54b84be1f6535b1", - "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" - } + "sessionQueueRequests": [ + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"linux\"\n}", + "{\n \"browserName\": \"chrome\",\n \"platformName\": \"Windows 11\",\n \"browserVersion\": \"91.0\"\n}" ] } } }`), - browserName: "chrome", - sessionBrowserName: "chrome", - sessionBrowserVersion: "91.0.4472.114", - platformName: "linux", + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "91.0.4472.114", + platformName: "linux", }, - want: 3, + want: 1, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := getCountFromSeleniumResponse(tt.args.b, tt.args.browserName, tt.args.browserVersion, tt.args.sessionBrowserName, tt.args.platformName, tt.args.sessionsPerNode, tt.args.setSessionsFromHub, tt.args.sessionBrowserVersion, logr.Discard()) + got, err := getCountFromSeleniumResponse(tt.args.b, tt.args.browserName, tt.args.browserVersion, tt.args.sessionBrowserName, tt.args.platformName, tt.args.nodeMaxSessions, logr.Discard()) if (err != nil) != tt.wantErr { t.Errorf("getCountFromSeleniumResponse() error = %v, wantErr %v", err, tt.wantErr) return @@ -736,14 +1747,13 @@ func Test_parseSeleniumGridScalerMetadata(t *testing.T) { }, wantErr: false, want: &seleniumGridScalerMetadata{ - URL: "http://selenium-hub:4444/graphql", - BrowserName: "chrome", - SessionBrowserName: "chrome", - TargetValue: 1, - BrowserVersion: "latest", - PlatformName: "linux", - SessionsPerNode: 1, - SessionBrowserVersion: "latest", + URL: "http://selenium-hub:4444/graphql", + BrowserName: "chrome", + SessionBrowserName: "chrome", + TargetValue: 1, + BrowserVersion: "latest", + PlatformName: "linux", + NodeMaxSessions: 1, }, }, { @@ -759,14 +1769,13 @@ func Test_parseSeleniumGridScalerMetadata(t *testing.T) { }, wantErr: false, want: &seleniumGridScalerMetadata{ - URL: "http://selenium-hub:4444/graphql", - BrowserName: "MicrosoftEdge", - SessionBrowserName: "msedge", - TargetValue: 1, - BrowserVersion: "latest", - PlatformName: "linux", - SessionsPerNode: 1, - SessionBrowserVersion: "latest", + URL: "http://selenium-hub:4444/graphql", + BrowserName: "MicrosoftEdge", + SessionBrowserName: "msedge", + TargetValue: 1, + BrowserVersion: "latest", + PlatformName: "linux", + NodeMaxSessions: 1, }, }, { @@ -774,7 +1783,9 @@ func Test_parseSeleniumGridScalerMetadata(t *testing.T) { args: args{ config: &scalersconfig.ScalerConfig{ AuthParams: map[string]string{ - "url": "http://user:password@selenium-hub:4444/graphql", + "url": "http://selenium-hub:4444/graphql", + "username": "user", + "password": "password", }, TriggerMetadata: map[string]string{ "browserName": "MicrosoftEdge", @@ -784,14 +1795,15 @@ func Test_parseSeleniumGridScalerMetadata(t *testing.T) { }, wantErr: false, want: &seleniumGridScalerMetadata{ - URL: "http://user:password@selenium-hub:4444/graphql", - BrowserName: "MicrosoftEdge", - SessionBrowserName: "msedge", - TargetValue: 1, - BrowserVersion: "latest", - PlatformName: "linux", - SessionsPerNode: 1, - SessionBrowserVersion: "latest", + URL: "http://selenium-hub:4444/graphql", + Username: "user", + Password: "password", + BrowserName: "MicrosoftEdge", + SessionBrowserName: "msedge", + TargetValue: 1, + BrowserVersion: "latest", + PlatformName: "linux", + NodeMaxSessions: 1, }, }, { @@ -811,16 +1823,15 @@ func Test_parseSeleniumGridScalerMetadata(t *testing.T) { }, wantErr: false, want: &seleniumGridScalerMetadata{ - URL: "http://selenium-hub:4444/graphql", - BrowserName: "MicrosoftEdge", - SessionBrowserName: "msedge", - TargetValue: 1, - BrowserVersion: "latest", - PlatformName: "linux", - Username: "username", - Password: "password", - SessionsPerNode: 1, - SessionBrowserVersion: "latest", + URL: "http://selenium-hub:4444/graphql", + BrowserName: "MicrosoftEdge", + SessionBrowserName: "msedge", + TargetValue: 1, + BrowserVersion: "latest", + PlatformName: "linux", + Username: "username", + Password: "password", + NodeMaxSessions: 1, }, }, { @@ -837,15 +1848,14 @@ func Test_parseSeleniumGridScalerMetadata(t *testing.T) { }, wantErr: false, want: &seleniumGridScalerMetadata{ - URL: "http://selenium-hub:4444/graphql", - BrowserName: "chrome", - SessionBrowserName: "chrome", - TargetValue: 1, - BrowserVersion: "91.0", - UnsafeSsl: false, - PlatformName: "linux", - SessionsPerNode: 1, - SessionBrowserVersion: "91.0", + URL: "http://selenium-hub:4444/graphql", + BrowserName: "chrome", + SessionBrowserName: "chrome", + TargetValue: 1, + BrowserVersion: "91.0", + UnsafeSsl: false, + PlatformName: "linux", + NodeMaxSessions: 1, }, }, { @@ -863,16 +1873,15 @@ func Test_parseSeleniumGridScalerMetadata(t *testing.T) { }, wantErr: false, want: &seleniumGridScalerMetadata{ - URL: "http://selenium-hub:4444/graphql", - BrowserName: "chrome", - SessionBrowserName: "chrome", - TargetValue: 1, - ActivationThreshold: 10, - BrowserVersion: "91.0", - UnsafeSsl: true, - PlatformName: "linux", - SessionsPerNode: 1, - SessionBrowserVersion: "91.0", + URL: "http://selenium-hub:4444/graphql", + BrowserName: "chrome", + SessionBrowserName: "chrome", + TargetValue: 1, + ActivationThreshold: 10, + BrowserVersion: "91.0", + UnsafeSsl: true, + PlatformName: "linux", + NodeMaxSessions: 1, }, }, { @@ -905,16 +1914,15 @@ func Test_parseSeleniumGridScalerMetadata(t *testing.T) { }, wantErr: false, want: &seleniumGridScalerMetadata{ - URL: "http://selenium-hub:4444/graphql", - BrowserName: "chrome", - SessionBrowserName: "chrome", - TargetValue: 1, - ActivationThreshold: 10, - BrowserVersion: "91.0", - UnsafeSsl: true, - PlatformName: "linux", - SessionsPerNode: 1, - SessionBrowserVersion: "91.0", + URL: "http://selenium-hub:4444/graphql", + BrowserName: "chrome", + SessionBrowserName: "chrome", + TargetValue: 1, + ActivationThreshold: 10, + BrowserVersion: "91.0", + UnsafeSsl: true, + PlatformName: "linux", + NodeMaxSessions: 1, }, }, { @@ -933,16 +1941,153 @@ func Test_parseSeleniumGridScalerMetadata(t *testing.T) { }, wantErr: false, want: &seleniumGridScalerMetadata{ - URL: "http://selenium-hub:4444/graphql", - BrowserName: "chrome", - SessionBrowserName: "chrome", - TargetValue: 1, - ActivationThreshold: 10, - BrowserVersion: "91.0", - UnsafeSsl: true, - PlatformName: "Windows 11", - SessionsPerNode: 1, - SessionBrowserVersion: "91.0", + URL: "http://selenium-hub:4444/graphql", + BrowserName: "chrome", + SessionBrowserName: "chrome", + TargetValue: 1, + ActivationThreshold: 10, + BrowserVersion: "91.0", + UnsafeSsl: true, + PlatformName: "Windows 11", + NodeMaxSessions: 1, + }, + }, + { + name: "valid url, browsername, unsafeSsl, activationThreshold, nodeMaxSessions and platformName with trigger auth params should return metadata", + args: args{ + config: &scalersconfig.ScalerConfig{ + TriggerMetadata: map[string]string{ + "url": "http://selenium-hub:4444/graphql", + "browserName": "chrome", + "browserVersion": "91.0", + "unsafeSsl": "true", + "activationThreshold": "10", + "platformName": "Windows 11", + "nodeMaxSessions": "3", + }, + AuthParams: map[string]string{ + "username": "user", + "password": "password", + }, + }, + }, + wantErr: false, + want: &seleniumGridScalerMetadata{ + URL: "http://selenium-hub:4444/graphql", + Username: "user", + Password: "password", + BrowserName: "chrome", + SessionBrowserName: "chrome", + TargetValue: 1, + ActivationThreshold: 10, + BrowserVersion: "91.0", + UnsafeSsl: true, + PlatformName: "Windows 11", + NodeMaxSessions: 3, + }, + }, + { + name: "url in trigger auth param takes precedence over url in trigger metadata", + args: args{ + config: &scalersconfig.ScalerConfig{ + TriggerMetadata: map[string]string{ + "url": "http://invalid.dns:4444/graphql", + "browserName": "chrome", + "browserVersion": "91.0", + "unsafeSsl": "true", + "activationThreshold": "10", + "platformName": "Windows 11", + "nodeMaxSessions": "3", + }, + AuthParams: map[string]string{ + "url": "http://selenium-hub:4444/graphql", + "username": "user", + "password": "password", + }, + }, + }, + wantErr: false, + want: &seleniumGridScalerMetadata{ + URL: "http://selenium-hub:4444/graphql", + Username: "user", + Password: "password", + BrowserName: "chrome", + SessionBrowserName: "chrome", + TargetValue: 1, + ActivationThreshold: 10, + BrowserVersion: "91.0", + UnsafeSsl: true, + PlatformName: "Windows 11", + NodeMaxSessions: 3, + }, + }, + { + name: "auth type is not Basic and access token is provided", + args: args{ + config: &scalersconfig.ScalerConfig{ + TriggerMetadata: map[string]string{ + "url": "http://selenium-hub:4444/graphql", + "browserName": "chrome", + "browserVersion": "91.0", + "unsafeSsl": "true", + "activationThreshold": "10", + "platformName": "Windows 11", + "nodeMaxSessions": "3", + }, + AuthParams: map[string]string{ + "url": "http://selenium-hub:4444/graphql", + "authType": "OAuth2", + "accessToken": "my-access-token", + }, + }, + }, + wantErr: false, + want: &seleniumGridScalerMetadata{ + URL: "http://selenium-hub:4444/graphql", + AuthType: "OAuth2", + AccessToken: "my-access-token", + BrowserName: "chrome", + SessionBrowserName: "chrome", + TargetValue: 1, + ActivationThreshold: 10, + BrowserVersion: "91.0", + UnsafeSsl: true, + PlatformName: "Windows 11", + NodeMaxSessions: 3, + }, + }, + { + name: "authenticating with bearer access token", + args: args{ + config: &scalersconfig.ScalerConfig{ + TriggerMetadata: map[string]string{ + "browserName": "chrome", + "browserVersion": "91.0", + "unsafeSsl": "true", + "activationThreshold": "10", + "platformName": "Windows 11", + "nodeMaxSessions": "3", + }, + AuthParams: map[string]string{ + "url": "http://selenium-hub:4444/graphql", + "authType": "Bearer", + "accessToken": "my-access-token", + }, + }, + }, + wantErr: false, + want: &seleniumGridScalerMetadata{ + URL: "http://selenium-hub:4444/graphql", + AuthType: "Bearer", + AccessToken: "my-access-token", + BrowserName: "chrome", + SessionBrowserName: "chrome", + TargetValue: 1, + ActivationThreshold: 10, + BrowserVersion: "91.0", + UnsafeSsl: true, + PlatformName: "Windows 11", + NodeMaxSessions: 3, }, }, } diff --git a/pkg/scaling/cache/scalers_cache.go b/pkg/scaling/cache/scalers_cache.go index acd253804c4..332a419b504 100644 --- a/pkg/scaling/cache/scalers_cache.go +++ b/pkg/scaling/cache/scalers_cache.go @@ -19,6 +19,7 @@ package cache import ( "context" "fmt" + "sync" "time" "github.com/expr-lang/expr/vm" @@ -40,6 +41,7 @@ type ScalersCache struct { ScalableObjectGeneration int64 Recorder record.EventRecorder CompiledFormula *vm.Program + mutex sync.RWMutex } type ScalerBuilder struct { @@ -50,6 +52,9 @@ type ScalerBuilder struct { // GetScalers returns array of scalers and scaler config stored in the cache func (c *ScalersCache) GetScalers() ([]scalers.Scaler, []scalersconfig.ScalerConfig) { + c.mutex.RLock() + defer c.mutex.RUnlock() + scalersList := make([]scalers.Scaler, 0, len(c.Scalers)) configsList := make([]scalersconfig.ScalerConfig, 0, len(c.Scalers)) for _, s := range c.Scalers { @@ -60,6 +65,17 @@ func (c *ScalersCache) GetScalers() ([]scalers.Scaler, []scalersconfig.ScalerCon return scalersList, configsList } +// getScalerBuilder returns a ScalerBuilder stored in the cache +func (c *ScalersCache) getScalerBuilder(index int) (ScalerBuilder, error) { + if index < 0 || index >= len(c.Scalers) { + return ScalerBuilder{}, fmt.Errorf("scaler with id %d not found. Len = %d", index, len(c.Scalers)) + } + + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.Scalers[index], nil +} + // GetPushScalers returns array of push scalers stored in the cache func (c *ScalersCache) GetPushScalers() []scalers.PushScaler { var result []scalers.PushScaler @@ -73,8 +89,10 @@ func (c *ScalersCache) GetPushScalers() []scalers.PushScaler { // Close closes all scalers in the cache func (c *ScalersCache) Close(ctx context.Context) { + c.mutex.Lock() scalers := c.Scalers c.Scalers = nil + c.mutex.Unlock() for _, s := range scalers { err := s.Scaler.Close(ctx) if err != nil { @@ -85,6 +103,8 @@ func (c *ScalersCache) Close(ctx context.Context) { // GetMetricSpecForScaling returns metrics specs for all scalers in the cache func (c *ScalersCache) GetMetricSpecForScaling(ctx context.Context) []v2.MetricSpec { + c.mutex.RLock() + defer c.mutex.RUnlock() var spec []v2.MetricSpec for _, s := range c.Scalers { spec = append(spec, s.Scaler.GetMetricSpecForScaling(ctx)...) @@ -96,12 +116,12 @@ func (c *ScalersCache) GetMetricSpecForScaling(ctx context.Context) []v2.MetricS func (c *ScalersCache) GetMetricSpecForScalingForScaler(ctx context.Context, index int) ([]v2.MetricSpec, error) { var err error - scalersList, _ := c.GetScalers() - if index < 0 || index >= len(scalersList) { - return nil, fmt.Errorf("scaler with id %d not found. Len = %d", index, len(c.Scalers)) + sb, err := c.getScalerBuilder(index) + if err != nil { + return nil, err } - metricSpecs := scalersList[index].GetMetricSpecForScaling(ctx) + metricSpecs := sb.Scaler.GetMetricSpecForScaling(ctx) // no metric spec returned for a scaler -> this could signal error during connection to the scaler // usually in case this is an external scaler @@ -123,11 +143,12 @@ func (c *ScalersCache) GetMetricSpecForScalingForScaler(ctx context.Context, ind // GetMetricsAndActivityForScaler returns metric value, activity and latency for a scaler identified by the metric name // and by the input index (from the list of scalers in this ScaledObject) func (c *ScalersCache) GetMetricsAndActivityForScaler(ctx context.Context, index int, metricName string) ([]external_metrics.ExternalMetricValue, bool, time.Duration, error) { - if index < 0 || index >= len(c.Scalers) { - return nil, false, -1, fmt.Errorf("scaler with id %d not found. Len = %d", index, len(c.Scalers)) + sb, err := c.getScalerBuilder(index) + if err != nil { + return nil, false, -1, err } startTime := time.Now() - metric, activity, err := c.Scalers[index].Scaler.GetMetricsAndActivity(ctx, metricName) + metric, activity, err := sb.Scaler.GetMetricsAndActivity(ctx, metricName) if err == nil { return metric, activity, time.Since(startTime), nil } @@ -141,26 +162,31 @@ func (c *ScalersCache) GetMetricsAndActivityForScaler(ctx context.Context, index return metric, activity, time.Since(startTime), err } -func (c *ScalersCache) refreshScaler(ctx context.Context, id int) (scalers.Scaler, error) { - if id < 0 || id >= len(c.Scalers) { - return nil, fmt.Errorf("scaler with id %d not found, len = %d, cache has been probably already invalidated", id, len(c.Scalers)) +func (c *ScalersCache) refreshScaler(ctx context.Context, index int) (scalers.Scaler, error) { + oldSb, err := c.getScalerBuilder(index) + if err != nil { + return nil, err } - sb := c.Scalers[id] - defer sb.Scaler.Close(ctx) - ns, sConfig, err := sb.Factory() + c.mutex.Lock() + defer c.mutex.Unlock() + + newScaler, sConfig, err := oldSb.Factory() if err != nil { return nil, err } - if id < 0 || id >= len(c.Scalers) { - return nil, fmt.Errorf("scaler with id %d not found, len = %d, cache has been probably already invalidated", id, len(c.Scalers)) + if index < 0 || index >= len(c.Scalers) { + return nil, fmt.Errorf("scaler with id %d not found. Len = %d", index, len(c.Scalers)) } - c.Scalers[id] = ScalerBuilder{ - Scaler: ns, + + c.Scalers[index] = ScalerBuilder{ + Scaler: newScaler, ScalerConfig: *sConfig, - Factory: sb.Factory, + Factory: oldSb.Factory, } - return ns, nil + oldSb.Scaler.Close(ctx) + + return newScaler, nil } diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go index 31fac07949b..9c44ac5004e 100644 --- a/pkg/scaling/scalers_builder.go +++ b/pkg/scaling/scalers_builder.go @@ -152,6 +152,8 @@ func buildScaler(ctx context.Context, client client.Client, triggerType string, return scalers.NewAzureQueueScaler(config) case "azure-servicebus": return scalers.NewAzureServiceBusScaler(ctx, config) + case "beanstalkd": + return scalers.NewBeanstalkdScaler(config) case "cassandra": return scalers.NewCassandraScaler(config) case "couchdb": diff --git a/pkg/status/status.go b/pkg/status/status.go index 8c42190d5f7..bbd1c00dbaa 100755 --- a/pkg/status/status.go +++ b/pkg/status/status.go @@ -55,19 +55,35 @@ func SetStatusConditions(ctx context.Context, client runtimeclient.StatusClient, // UpdateScaledObjectStatus patches the given ScaledObject with the updated status passed to it or returns an error. func UpdateScaledObjectStatus(ctx context.Context, client runtimeclient.StatusClient, logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, status *kedav1alpha1.ScaledObjectStatus) error { + return updateObjectStatus(ctx, client, logger, scaledObject, status) +} + +// UpdateScaledJobStatus patches the given ScaledObject with the updated status passed to it or returns an error. +func UpdateScaledJobStatus(ctx context.Context, client runtimeclient.StatusClient, logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob, status *kedav1alpha1.ScaledJobStatus) error { + return updateObjectStatus(ctx, client, logger, scaledJob, status) +} + +// updateObjectStatus patches the given ScaledObject with the updated status passed to it or returns an error. +func updateObjectStatus(ctx context.Context, client runtimeclient.StatusClient, logger logr.Logger, object interface{}, status interface{}) error { transform := func(runtimeObj runtimeclient.Object, target interface{}) error { - status, ok := target.(*kedav1alpha1.ScaledObjectStatus) - if !ok { - return fmt.Errorf("transform target is not kedav1alpha1.ScaledObjectStatus type %v", target) - } switch obj := runtimeObj.(type) { case *kedav1alpha1.ScaledObject: + status, ok := target.(*kedav1alpha1.ScaledObjectStatus) + if !ok { + return fmt.Errorf("transform target is not kedav1alpha1.ScaledObjectStatus type %v", target) + } + obj.Status = *status + case *kedav1alpha1.ScaledJob: + status, ok := target.(*kedav1alpha1.ScaledJobStatus) + if !ok { + return fmt.Errorf("transform target is not kedav1alpha1.ScaledJobStatus type %v", target) + } obj.Status = *status default: } return nil } - return TransformObject(ctx, client, logger, scaledObject, status, transform) + return TransformObject(ctx, client, logger, object, status, transform) } // getTriggerAuth returns TriggerAuthentication/ClusterTriggerAuthentication object and its status from AuthenticationRef or returns an error. diff --git a/tests/helper/helper.go b/tests/helper/helper.go index a21adb0c48f..c0262fa3674 100644 --- a/tests/helper/helper.go +++ b/tests/helper/helper.go @@ -253,6 +253,7 @@ func DeleteNamespace(t *testing.T, nsName string) { err = nil } assert.NoErrorf(t, err, "cannot delete kubernetes namespace - %s", err) + DeletePodsInNamespace(t, nsName) } func WaitForJobSuccess(t *testing.T, kc *kubernetes.Clientset, jobName, namespace string, iterations, interval int) bool { @@ -744,6 +745,17 @@ func DeletePodsInNamespaceBySelector(t *testing.T, kc *kubernetes.Clientset, sel assert.NoErrorf(t, err, "cannot delete pods - %s", err) } +// Delete all pods in namespace +func DeletePodsInNamespace(t *testing.T, namespace string) { + err := GetKubernetesClient(t).CoreV1().Pods(namespace).DeleteCollection(context.Background(), metav1.DeleteOptions{ + GracePeriodSeconds: ptr.To(int64(0)), + }, metav1.ListOptions{}) + if errors.IsNotFound(err) { + err = nil + } + assert.NoErrorf(t, err, "cannot delete pods - %s", err) +} + // Wait for Pods identified by selector to complete termination func WaitForPodsTerminated(t *testing.T, kc *kubernetes.Clientset, selector, namespace string, iterations, intervalSeconds int) bool { for i := 0; i < iterations; i++ { diff --git a/tests/internals/cache_metrics/cache_metrics_test.go b/tests/internals/cache_metrics/cache_metrics_test.go index 3f5525c1833..10008711668 100644 --- a/tests/internals/cache_metrics/cache_metrics_test.go +++ b/tests/internals/cache_metrics/cache_metrics_test.go @@ -160,8 +160,8 @@ func testCacheMetricsOnPollingInterval(t *testing.T, kc *kubernetes.Clientset, d // Metric Value = 8, DesiredAverageMetricValue = 2 // should scale in to 8/2 = 4 replicas, irrespective of current replicas - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") // Changing Metric Value to 4, but because we have a long polling interval, the replicas number should remain the same data.MonitoredDeploymentReplicas = 4 @@ -196,8 +196,8 @@ func testDirectQuery(t *testing.T, kc *kubernetes.Clientset, data templateData) // Metric Value = 8, DesiredAverageMetricValue = 2 // should scale in to 8/2 = 4 replicas, irrespective of current replicas - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") // Changing Metric Value to 4, deployment should scale to 2 data.MonitoredDeploymentReplicas = 4 diff --git a/tests/internals/eventemitter/azureeventgridtopic/azureeventgridtopic_test.go b/tests/internals/eventemitter/azureeventgridtopic/azureeventgridtopic_test.go index ae00ea8173d..a65b40c5af1 100644 --- a/tests/internals/eventemitter/azureeventgridtopic/azureeventgridtopic_test.go +++ b/tests/internals/eventemitter/azureeventgridtopic/azureeventgridtopic_test.go @@ -265,21 +265,25 @@ func checkMessage(t *testing.T, count int, client *azservicebus.Client) { if err != nil { assert.NoErrorf(t, err, "cannot create receiver - %s", err) } - defer receiver.Close(context.TODO()) - - messages, err := receiver.ReceiveMessages(context.TODO(), count, nil) - assert.NoErrorf(t, err, "cannot receive messages - %s", err) - assert.NotEmpty(t, messages) + defer receiver.Close(context.Background()) + // We try to read the messages 3 times with a second of delay + tries := 3 found := false - for _, message := range messages { - event := messaging.CloudEvent{} - err = json.Unmarshal(message.Body, &event) - assert.NoErrorf(t, err, "cannot retrieve message - %s", err) - if expectedSubject == *event.Subject && - expectedSource == event.Source && - expectedType == event.Type { - found = true + for i := 0; i < tries && !found; i++ { + messages, err := receiver.ReceiveMessages(context.Background(), count, nil) + assert.NoErrorf(t, err, "cannot receive messages - %s", err) + assert.NotEmpty(t, messages) + + for _, message := range messages { + event := messaging.CloudEvent{} + err = json.Unmarshal(message.Body, &event) + assert.NoErrorf(t, err, "cannot retrieve message - %s", err) + if expectedSubject == *event.Subject && + expectedSource == event.Source && + expectedType == event.Type { + found = true + } } } diff --git a/tests/internals/idle_replicas/idle_replicas_test.go b/tests/internals/idle_replicas/idle_replicas_test.go index 46a682f9180..db5f25e0dac 100644 --- a/tests/internals/idle_replicas/idle_replicas_test.go +++ b/tests/internals/idle_replicas/idle_replicas_test.go @@ -147,8 +147,8 @@ func testScaleOut(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- scale to max replicas ---") KubernetesScaleDeployment(t, kc, monitoredDeploymentName, 4, testNamespace) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") } func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { diff --git a/tests/internals/restore_original/restore_original_test.go b/tests/internals/restore_original/restore_original_test.go index 83de6a3b3a1..02a8f711210 100644 --- a/tests/internals/restore_original/restore_original_test.go +++ b/tests/internals/restore_original/restore_original_test.go @@ -138,8 +138,8 @@ func testScale(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing scaling ---") KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") } func testRestore(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/internals/scaled_job_validation/scaled_job_validation_test.go b/tests/internals/scaled_job_validation/scaled_job_validation_test.go index 217c3cc8c85..6074a0949f7 100644 --- a/tests/internals/scaled_job_validation/scaled_job_validation_test.go +++ b/tests/internals/scaled_job_validation/scaled_job_validation_test.go @@ -13,7 +13,7 @@ import ( ) const ( - testName = "scaled-object-validation-test" + testName = "scaled-job-validation-test" ) var ( diff --git a/tests/internals/scaled_object_validation/scaled_object_validation_test.go b/tests/internals/scaled_object_validation/scaled_object_validation_test.go index 9cdaff34515..2af7f6b81d8 100644 --- a/tests/internals/scaled_object_validation/scaled_object_validation_test.go +++ b/tests/internals/scaled_object_validation/scaled_object_validation_test.go @@ -131,6 +131,27 @@ spec: desiredReplicas: '1' ` + customHpaScaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + advanced: + horizontalPodAutoscalerConfig: + name: {{.HpaName}} + triggers: + - type: cron + metadata: + timezone: Etc/UTC + start: 0 * * * * + end: 1 * * * * + desiredReplicas: '1' + ` + hpaTemplate = ` apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -179,6 +200,8 @@ func TestScaledObjectValidations(t *testing.T) { testScaledWorkloadByOtherScaledObject(t, data) + testManagedHpaByOtherScaledObject(t, data) + testScaledWorkloadByOtherHpa(t, data) testScaledWorkloadByOtherHpaWithOwnershipTransfer(t, data) @@ -220,6 +243,25 @@ func testScaledWorkloadByOtherScaledObject(t *testing.T, data templateData) { KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) } +func testManagedHpaByOtherScaledObject(t *testing.T, data templateData) { + t.Log("--- already managed hpa by other scaledobject---") + + data.HpaName = hpaName + + data.ScaledObjectName = scaledObject1Name + err := KubectlApplyWithErrors(t, data, "scaledObjectTemplate", customHpaScaledObjectTemplate) + assert.NoErrorf(t, err, "cannot deploy the scaledObject - %s", err) + + data.ScaledObjectName = scaledObject2Name + data.DeploymentName = fmt.Sprintf("%s-other-deployment", testName) + err = KubectlApplyWithErrors(t, data, "scaledObjectTemplate", customHpaScaledObjectTemplate) + assert.Errorf(t, err, "can deploy the scaledObject - %s", err) + assert.Contains(t, err.Error(), fmt.Sprintf("the HPA '%s' is already managed by the ScaledObject '%s", hpaName, scaledObject1Name)) + + data.ScaledObjectName = scaledObject1Name + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) +} + func testScaledWorkloadByOtherHpa(t *testing.T, data templateData) { t.Log("--- already scaled workload by other hpa---") diff --git a/tests/internals/scaling_strategies/eager_scaling_strategy_test.go b/tests/internals/scaling_strategies/eager_scaling_strategy_test.go index e05c84c30a0..222f960ef16 100644 --- a/tests/internals/scaling_strategies/eager_scaling_strategy_test.go +++ b/tests/internals/scaling_strategies/eager_scaling_strategy_test.go @@ -100,8 +100,11 @@ func TestScalingStrategy(t *testing.T) { }) RMQInstall(t, kc, rmqNamespace, user, password, vhost, WithoutOAuth()) - CreateKubernetesResources(t, kc, testNamespace, data, templates) + // Publish 0 messges but create the queue + RMQPublishMessages(t, rmqNamespace, connectionString, queueName, 0) + WaitForAllJobsSuccess(t, kc, rmqNamespace, 60, 1) + CreateKubernetesResources(t, kc, testNamespace, data, templates) testEagerScaling(t, kc) } @@ -121,14 +124,17 @@ func getTemplateData() (templateData, []Template) { func testEagerScaling(t *testing.T, kc *kubernetes.Clientset) { iterationCount := 20 RMQPublishMessages(t, rmqNamespace, connectionString, queueName, 4) + WaitForAllJobsSuccess(t, kc, rmqNamespace, 60, 1) assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 4, iterationCount, 1), "job count should be %d after %d iterations", 4, iterationCount) RMQPublishMessages(t, rmqNamespace, connectionString, queueName, 4) + WaitForAllJobsSuccess(t, kc, rmqNamespace, 60, 1) assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 8, iterationCount, 1), "job count should be %d after %d iterations", 8, iterationCount) - RMQPublishMessages(t, rmqNamespace, connectionString, queueName, 4) + RMQPublishMessages(t, rmqNamespace, connectionString, queueName, 8) + WaitForAllJobsSuccess(t, kc, rmqNamespace, 60, 1) assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 10, iterationCount, 1), "job count should be %d after %d iterations", 10, iterationCount) } diff --git a/tests/internals/status_update/status_update_test.go b/tests/internals/status_update/status_update_test.go new file mode 100644 index 00000000000..31e0d659b96 --- /dev/null +++ b/tests/internals/status_update/status_update_test.go @@ -0,0 +1,277 @@ +//go:build e2e +// +build e2e + +package status_update_test + +import ( + "fmt" + "testing" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +const ( + testName = "status-update-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + metricsServerDeploymentName = fmt.Sprintf("%s-metrics-server", testName) + servciceName = fmt.Sprintf("%s-service", testName) + triggerAuthName = fmt.Sprintf("%s-ta", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + scaledJobName = fmt.Sprintf("%s-sj", testName) + secretName = fmt.Sprintf("%s-secret", testName) + metricsServerEndpoint = fmt.Sprintf("http://%s.%s.svc.cluster.local:8080/api/value", servciceName, testNamespace) + minReplicaCount = 0 + maxReplicaCount = 2 +) + +type templateData struct { + TestNamespace string + DeploymentName string + MetricsServerDeploymentName string + MetricsServerEndpoint string + ServciceName string + ScaledObjectName string + ScaledJobName string + TriggerAuthName string + SecretName string + MetricValue int + MinReplicaCount string + MaxReplicaCount string +} + +const ( + secretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +data: + AUTH_PASSWORD: U0VDUkVUCg== + AUTH_USERNAME: VVNFUgo= +` + + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthName}} + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: username + name: {{.SecretName}} + key: AUTH_USERNAME + - parameter: password + name: {{.SecretName}} + key: AUTH_PASSWORD +` + + metricsServerdeploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.MetricsServerDeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.MetricsServerDeploymentName}} +spec: + replicas: 1 + selector: + matchLabels: + app: {{.MetricsServerDeploymentName}} + template: + metadata: + labels: + app: {{.MetricsServerDeploymentName}} + spec: + containers: + - name: metrics + image: ghcr.io/kedacore/tests-metrics-api + ports: + - containerPort: 8080 + envFrom: + - secretRef: + name: {{.SecretName}} + imagePullPolicy: Always +` + + serviceTemplate = ` +apiVersion: v1 +kind: Service +metadata: + name: {{.ServciceName}} + namespace: {{.TestNamespace}} +spec: + selector: + app: {{.MetricsServerDeploymentName}} + ports: + - port: 8080 + targetPort: 8080 +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{.DeploymentName}} + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} +spec: + selector: + matchLabels: + app: {{.DeploymentName}} + replicas: 0 + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged + ports: + - containerPort: 80 +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + cooldownPeriod: 1 + triggers: + - type: metrics-api + metadata: + targetValue: "5" + activationTargetValue: "20" + url: "{{.MetricsServerEndpoint}}" + valueLocation: 'value' + authMode: "basic" + method: "query" + authenticationRef: + name: {{.TriggerAuthName}} + - type: corn + metadata: + timezone: Asia/Kolkata + start: 0 6 * * * + end: 0 8 * * * + desiredReplicas: "9" + - type: corn + metadata: + timezone: Asia/Kolkata + start: 0 22 * * * + end: 0 23 * * * + desiredReplicas: "9"` + + scaledJobTemplate = ` + apiVersion: keda.sh/v1alpha1 + kind: ScaledJob + metadata: + name: {{.ScaledJobName}} + namespace: {{.TestNamespace}} + spec: + jobTargetRef: + template: + spec: + containers: + - name: external-executor + image: busybox + command: + - sleep + - "30" + imagePullPolicy: IfNotPresent + restartPolicy: Never + backoffLimit: 1 + pollingInterval: 5 + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + successfulJobsHistoryLimit: 0 + failedJobsHistoryLimit: 0 + triggers: + - type: metrics-api + metadata: + targetValue: "5" + activationTargetValue: "20" + url: "{{.MetricsServerEndpoint}}" + valueLocation: 'value' + authMode: "basic" + method: "query" + authenticationRef: + name: {{.TriggerAuthName}} + - type: corn + metadata: + timezone: Asia/Kolkata + start: 0 6 * * * + end: 0 8 * * * + desiredReplicas: "9" + - type: corn + metadata: + timezone: Asia/Kolkata + start: 0 22 * * * + end: 0 23 * * * + desiredReplicas: "9"` +) + +func TestScaler(t *testing.T) { + // setup + t.Log("--- setting up ---") + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + // test + testTriggersAndAuthenticationsTypes(t) + + // cleanup + DeleteKubernetesResources(t, testNamespace, data, templates) +} + +func testTriggersAndAuthenticationsTypes(t *testing.T) { + otherparameter := `-o jsonpath="{.status.triggersTypes}"` + CheckKubectlGetResult(t, "ScaledObject", scaledObjectName, testNamespace, otherparameter, "metrics-api,corn") + otherparameter = `-o jsonpath="{.status.authenticationsTypes}"` + CheckKubectlGetResult(t, "ScaledObject", scaledObjectName, testNamespace, otherparameter, triggerAuthName) + otherparameter = `-o jsonpath="{.status.triggersTypes}"` + CheckKubectlGetResult(t, "ScaledJob", scaledJobName, testNamespace, otherparameter, "metrics-api,corn") + otherparameter = `-o jsonpath="{.status.authenticationsTypes}"` + CheckKubectlGetResult(t, "ScaledJob", scaledJobName, testNamespace, otherparameter, triggerAuthName) +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + MetricsServerDeploymentName: metricsServerDeploymentName, + ServciceName: servciceName, + TriggerAuthName: triggerAuthName, + ScaledObjectName: scaledObjectName, + ScaledJobName: scaledJobName, + SecretName: secretName, + MetricsServerEndpoint: metricsServerEndpoint, + MinReplicaCount: fmt.Sprintf("%v", minReplicaCount), + MaxReplicaCount: fmt.Sprintf("%v", maxReplicaCount), + MetricValue: 0, + }, []Template{ + {Name: "secretTemplate", Config: secretTemplate}, + {Name: "metricsServerdeploymentTemplate", Config: metricsServerdeploymentTemplate}, + {Name: "serviceTemplate", Config: serviceTemplate}, + {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + {Name: "scaledJobTemplate", Config: scaledJobTemplate}, + } +} diff --git a/tests/internals/value_metric_type/value_metric_type_test.go b/tests/internals/value_metric_type/value_metric_type_test.go index 06a175f9012..40dd1646dc8 100644 --- a/tests/internals/value_metric_type/value_metric_type_test.go +++ b/tests/internals/value_metric_type/value_metric_type_test.go @@ -149,8 +149,8 @@ func testScaleByAverageValue(t *testing.T, kc *kubernetes.Clientset, data templa // Metric Value = 8, DesiredAverageMetricValue = 2 // should scale in to 8/2 = 4 replicas, irrespective of current replicas - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) } diff --git a/tests/run-all.go b/tests/run-all.go index 76134d55e32..c30cbe27f48 100644 --- a/tests/run-all.go +++ b/tests/run-all.go @@ -25,11 +25,11 @@ import ( ) var ( - concurrentTests = 15 + concurrentTests = 25 regularTestsTimeout = "20m" regularTestsRetries = 3 sequentialTestsTimeout = "20m" - sequentialTestsRetries = 1 + sequentialTestsRetries = 2 ) type TestResult struct { diff --git a/tests/scalers/artemis/artemis_test.go b/tests/scalers/artemis/artemis_test.go index 832c978852f..8bfcd61d64a 100644 --- a/tests/scalers/artemis/artemis_test.go +++ b/tests/scalers/artemis/artemis_test.go @@ -40,6 +40,7 @@ type templateData struct { SecretName string ArtemisPasswordBase64 string ArtemisUserBase64 string + MessageCount int } const ( @@ -87,8 +88,8 @@ spec: spec: containers: - name: kedartemis-consumer - image: balchu/kedartemis-consumer - imagePullPolicy: Always + image: ghcr.io/kedacore/tests-artemis + args: ["consumer"] env: - name: ARTEMIS_PASSWORD valueFrom: @@ -100,10 +101,12 @@ spec: secretKeyRef: name: {{.SecretName}} key: artemis-username - - name: ARTEMIS_HOST + - name: ARTEMIS_SERVER_HOST value: "artemis-activemq.{{.TestNamespace}}" - - name: ARTEMIS_PORT + - name: ARTEMIS_SERVER_PORT value: "61616" + - name: ARTEMIS_MESSAGE_SLEEP_MS + value: "70" ` artemisDeploymentTemplate = `apiVersion: apps/v1 @@ -260,7 +263,7 @@ spec: managementEndpoint: "artemis-activemq.{{.TestNamespace}}:8161" queueName: "test" queueLength: "50" - activationQueueLength: "1500" + activationQueueLength: "5" brokerName: "artemis-activemq" brokerAddress: "test" authenticationRef: @@ -279,7 +282,8 @@ spec: spec: containers: - name: artemis-producer - image: balchu/artemis-producer:0.0.1 + image: ghcr.io/kedacore/tests-artemis + args: ["producer"] env: - name: ARTEMIS_PASSWORD valueFrom: @@ -295,6 +299,8 @@ spec: value: "artemis-activemq.{{.TestNamespace}}" - name: ARTEMIS_SERVER_PORT value: "61616" + - name: ARTEMIS_MESSAGE_COUNT + value: "{{.MessageCount}}" restartPolicy: Never backoffLimit: 4 ` @@ -321,6 +327,7 @@ func TestArtemisScaler(t *testing.T) { func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing activation ---") + data.MessageCount = 1 KubectlReplaceWithTemplate(t, data, "triggerJobTemplate", producerJob) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) @@ -328,6 +335,7 @@ func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing scale out ---") + data.MessageCount = 1000 KubectlReplaceWithTemplate(t, data, "triggerJobTemplate", producerJob) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), @@ -349,6 +357,7 @@ func getTemplateData() (templateData, []Template) { SecretName: secretName, ArtemisPasswordBase64: base64.StdEncoding.EncodeToString([]byte(artemisPassword)), ArtemisUserBase64: base64.StdEncoding.EncodeToString([]byte(artemisUser)), + MessageCount: 0, }, []Template{ {Name: "secretTemplate", Config: secretTemplate}, {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate}, diff --git a/tests/scalers/azure/azure_event_hub_dapr_wi/azure_event_hub_dapr_wi_test.go b/tests/scalers/azure/azure_event_hub_dapr_wi/azure_event_hub_dapr_wi_test.go index 13870a40bc4..8eeeae71fae 100644 --- a/tests/scalers/azure/azure_event_hub_dapr_wi/azure_event_hub_dapr_wi_test.go +++ b/tests/scalers/azure/azure_event_hub_dapr_wi/azure_event_hub_dapr_wi_test.go @@ -26,7 +26,7 @@ import ( var _ = godotenv.Load("../../../.env") const ( - testName = "azure-event-hub-dapr" + testName = "azure-event-hub-dapr-wi" eventhubConsumerGroup = "$Default" ) diff --git a/tests/scalers/azure/azure_service_bus_queue_regex/azure_service_bus_queue_regex_test.go b/tests/scalers/azure/azure_service_bus_queue_regex/azure_service_bus_queue_regex_test.go index a8419ecd44b..06c3c3c2db1 100644 --- a/tests/scalers/azure/azure_service_bus_queue_regex/azure_service_bus_queue_regex_test.go +++ b/tests/scalers/azure/azure_service_bus_queue_regex/azure_service_bus_queue_regex_test.go @@ -202,8 +202,8 @@ func testScale(t *testing.T, kc *kubernetes.Clientset, client *azservicebus.Clie // check different aggregation operations data.Operation = "max" KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") data.Operation = "avg" KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) diff --git a/tests/scalers/azure/azure_service_bus_topic_regex/azure_service_bus_topic_regex_test.go b/tests/scalers/azure/azure_service_bus_topic_regex/azure_service_bus_topic_regex_test.go index 07bfdc57572..3227f2fdebd 100644 --- a/tests/scalers/azure/azure_service_bus_topic_regex/azure_service_bus_topic_regex_test.go +++ b/tests/scalers/azure/azure_service_bus_topic_regex/azure_service_bus_topic_regex_test.go @@ -225,8 +225,8 @@ func testScale(t *testing.T, kc *kubernetes.Clientset, client *azservicebus.Clie // check different aggregation operations data.Operation = "max" KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") data.Operation = "avg" KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) diff --git a/tests/scalers/beanstalkd/beanstalkd_test.go b/tests/scalers/beanstalkd/beanstalkd_test.go new file mode 100644 index 00000000000..7565d3fa0c6 --- /dev/null +++ b/tests/scalers/beanstalkd/beanstalkd_test.go @@ -0,0 +1,267 @@ +//go:build e2e +// +build e2e + +package beanstalkd_test + +import ( + "fmt" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testName = "beanstalkd-test" + deploymentName = "beanstalkd-consumer-deployment" + beanstalkdPutJobName = "beanstalkd-put-job" + beanstalkdPopJobName = "beanstalkd-pop-job" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + beanstalkdDeploymentName = fmt.Sprintf("%s-beanstalkd-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + beanstalkdTubeName = "default" + activationJobCount = 5 +) + +type templateData struct { + TestNamespace string + BeanstalkdDeploymentName string + BeanstalkdPutJobName string + BeanstalkdPopJobName string + ScaledObjectName string + DeploymentName string + BeanstalkdTubeName string + JobCount int +} + +const ( + beanstalkdDeploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: beanstalkd + name: {{.BeanstalkdDeploymentName}} + namespace: {{.TestNamespace}} +spec: + selector: + matchLabels: + app: beanstalkd + template: + metadata: + labels: + app: beanstalkd + spec: + containers: + - image: docker.io/schickling/beanstalkd + name: beanstalkd + ports: + - containerPort: 11300 + name: beanstalkd + readinessProbe: + tcpSocket: + port: 11300 + initialDelaySeconds: 5 + periodSeconds: 10 +--- +apiVersion: v1 +kind: Service +metadata: + name: beanstalkd + namespace: {{.TestNamespace}} +spec: + ports: + - name: beanstalkd + port: 11300 + targetPort: 11300 + selector: + app: beanstalkd + type: ClusterIP +` + + scaledObjectActivationTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + maxReplicaCount: 3 + pollingInterval: 5 + cooldownPeriod: 10 + triggers: + - type: beanstalkd + metadata: + server: beanstalkd.{{.TestNamespace}}:11300 + value: "15" + activationValue: "10" + tube: {{.BeanstalkdTubeName}} +` + + beanstalkdPutJobsTemplate = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: {{.BeanstalkdPutJobName}} + namespace: {{.TestNamespace}} +spec: + template: + spec: + containers: + - name: beanstalkd-put-job + image: docker.io/sitecrafting/beanstalkd-cli + command: ["/bin/sh"] + args: ["-c", "for run in $(seq 1 {{.JobCount}}); do beanstalkd-cli --host=beanstalkd put \"Test Job\"; done;"] + restartPolicy: OnFailure +` + + beanstalkdPopJobsTemplate = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: {{.BeanstalkdPopJobName}} + namespace: {{.TestNamespace}} +spec: + template: + spec: + containers: + - name: beanstalkd-pop-job + image: docker.io/sitecrafting/beanstalkd-cli + command: ["/bin/sh"] + args: ["-c", "for run in $(seq 1 {{.JobCount}}); do beanstalkd-cli --host=beanstalkd pop; done;"] + restartPolicy: OnFailure +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: nginx-deployment +spec: + replicas: 0 + selector: + matchLabels: + app: nginx-deployment + template: + metadata: + labels: + app: nginx-deployment + spec: + containers: + - name: nginx-deployment + image: nginxinc/nginx-unprivileged + ports: + - containerPort: 80 +` +) + +func TestBeanstalkdScaler(t *testing.T) { + // setup + t.Log("--- setting up ---") + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, beanstalkdDeploymentName, testNamespace, 1, 60, 1), + "replica count should be 0 after a minute") + + // test activation + testActivation(t, kc, data) + + // test scaling in + testScaleOut(t, kc, data) + + // scaling out + testScaleIn(t, kc, data) +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + ScaledObjectName: scaledObjectName, + DeploymentName: deploymentName, + BeanstalkdDeploymentName: beanstalkdDeploymentName, + BeanstalkdTubeName: beanstalkdTubeName, + BeanstalkdPutJobName: beanstalkdPutJobName, + BeanstalkdPopJobName: beanstalkdPopJobName, + JobCount: activationJobCount, + }, []Template{ + {Name: "beanstalkdDeploymentTemplate", Config: beanstalkdDeploymentTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + } +} + +// Adds five beanstalkd jobs to the default tube +func addBeanstalkdJobs(t *testing.T, kc *kubernetes.Clientset, data *templateData) { + // run putJob + KubectlReplaceWithTemplate(t, data, "beanstalkdPutJobsTemplate", beanstalkdPutJobsTemplate) + require.True(t, WaitForJobSuccess(t, kc, beanstalkdPutJobName, testNamespace, 30, 2), "Job should run successfully") +} + +// Removes five beanstalkd jobs from the default tube +func removeBeanstalkdJobs(t *testing.T, kc *kubernetes.Clientset, data *templateData) { + // run putJob + KubectlReplaceWithTemplate(t, data, "beanstalkdPopJobsTemplate", beanstalkdPopJobsTemplate) + require.True(t, WaitForJobSuccess(t, kc, beanstalkdPopJobName, testNamespace, 30, 2), "Job should run successfully") +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing activation---") + + KubectlApplyWithTemplate(t, data, "scaledObjectActivationTemplate", scaledObjectActivationTemplate) + + // Add 5 beanstalkd jobs + data.JobCount = 5 + addBeanstalkdJobs(t, kc, &data) + + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scaling out ---") + + // Add 100 beanstalkd jobs + data.JobCount = 100 + addBeanstalkdJobs(t, kc, &data) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 3, 60, 1), + "replica count should be 3 after a minute") +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scaling in ---") + + // Remove 80 beanstalkd jobs + data.JobCount = 80 + removeBeanstalkdJobs(t, kc, &data) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 60, 5), + "replica count should be 2 after 5 minutes") + + // Remove remaining beanstalkd jobs + data.JobCount = 25 + removeBeanstalkdJobs(t, kc, &data) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), + "replica count should be 0 after a minute") +} diff --git a/tests/scalers/elasticsearch/elasticsearch_test.go b/tests/scalers/elasticsearch/elasticsearch_test.go index ea83f315298..68239c27d64 100644 --- a/tests/scalers/elasticsearch/elasticsearch_test.go +++ b/tests/scalers/elasticsearch/elasticsearch_test.go @@ -81,7 +81,7 @@ metadata: labels: app: {{.DeploymentName}} spec: - replicas: 1 + replicas: 0 selector: matchLabels: app: {{.DeploymentName}} @@ -397,7 +397,7 @@ func testElasticsearchScaler(t *testing.T, kc *kubernetes.Clientset) { AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) t.Log("--- testing scale out ---") - addElements(t, 5) + addElements(t, 10) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), "replica count should be %d after 3 minutes", maxReplicaCount) diff --git a/tests/scalers/external_scaler_sj/external_scaler_sj_test.go b/tests/scalers/external_scaler_sj/external_scaler_sj_test.go index fc47c72787d..a5cac292327 100644 --- a/tests/scalers/external_scaler_sj/external_scaler_sj_test.go +++ b/tests/scalers/external_scaler_sj/external_scaler_sj_test.go @@ -9,6 +9,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -139,6 +140,9 @@ func TestScaler(t *testing.T) { CreateKubernetesResources(t, kc, testNamespace, data, templates) + require.True(t, WaitForDeploymentReplicaReadyCount(t, kc, scalerName, testNamespace, 1, 60, 1), + "replica count should be 1 after 1 minute") + assert.True(t, WaitForJobCount(t, kc, testNamespace, 0, 60, 1), "job count should be 0 after 1 minute") @@ -184,7 +188,7 @@ func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) { data.MetricValue = 0 KubectlReplaceWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate) - assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 0, 60, 1), - "job count should be 0 after 1 minute") + assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 0, 120, 1), + "job count should be 0 after 2 minute") KubectlDeleteWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate) } diff --git a/tests/scalers/github_runner/github_runner_test.go b/tests/scalers/github_runner/github_runner_test.go index 97235dfb978..8c64f44be44 100644 --- a/tests/scalers/github_runner/github_runner_test.go +++ b/tests/scalers/github_runner/github_runner_test.go @@ -313,13 +313,11 @@ func TestScaler(t *testing.T) { // test scaling Scaled Job with App KubectlApplyWithTemplate(t, data, "scaledGhaJobTemplate", scaledGhaJobTemplate) - // testActivation(t, kc, client) testJobScaleOut(t, kc, client, ghaWorkflowID) testJobScaleIn(t, kc) // test scaling Scaled Job KubectlApplyWithTemplate(t, data, "scaledJobTemplate", scaledJobTemplate) - // testActivation(t, kc, client) testJobScaleOut(t, kc, client, workflowID) testJobScaleIn(t, kc) diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_amqp/rabbitmq_queue_amqp_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_amqp/rabbitmq_queue_amqp_test.go index 3bc8efa179e..a545ae3be28 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_amqp/rabbitmq_queue_amqp_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_amqp/rabbitmq_queue_amqp_test.go @@ -111,8 +111,8 @@ func getTemplateData() (templateData, []Template) { func testScaling(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing scale out ---") RMQPublishMessages(t, rmqNamespace, connectionString, queueName, messageCount) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") t.Log("--- testing scale in ---") assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_amqp_auth/rabbitmq_queue_amqp_auth_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_amqp_auth/rabbitmq_queue_amqp_auth_test.go index 47acb3e7144..b73a7ff63d9 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_amqp_auth/rabbitmq_queue_amqp_auth_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_amqp_auth/rabbitmq_queue_amqp_auth_test.go @@ -20,7 +20,7 @@ import ( var _ = godotenv.Load("../../../.env") const ( - testName = "rmq-queue-amqp-test" + testName = "rmq-queue-amqp-auth-test" ) var ( diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_amqp_vhost/rabbitmq_queue_amqp_vhost_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_amqp_vhost/rabbitmq_queue_amqp_vhost_test.go index c1d957e103e..915eefa1ff0 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_amqp_vhost/rabbitmq_queue_amqp_vhost_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_amqp_vhost/rabbitmq_queue_amqp_vhost_test.go @@ -111,8 +111,8 @@ func getTemplateData() (templateData, []Template) { func testScaling(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing scale out ---") RMQPublishMessages(t, rmqNamespace, connectionString, queueName, messageCount) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") t.Log("--- testing scale in ---") assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http/rabbitmq_queue_http_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http/rabbitmq_queue_http_test.go index ff1f930e7b0..b27e006c106 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http/rabbitmq_queue_http_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http/rabbitmq_queue_http_test.go @@ -110,8 +110,8 @@ func getTemplateData() (templateData, []Template) { func testScaling(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing scale out ---") RMQPublishMessages(t, rmqNamespace, connectionString, queueName, messageCount) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") t.Log("--- testing scale in ---") assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http_aad_wi/rabbitmq_queue_http_aad_wi_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http_aad_wi/rabbitmq_queue_http_aad_wi_test.go index bd49396717a..cad93024009 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http_aad_wi/rabbitmq_queue_http_aad_wi_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http_aad_wi/rabbitmq_queue_http_aad_wi_test.go @@ -162,8 +162,8 @@ func getTemplateData() (templateData, []Template) { func testScaling(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing scale out ---") RMQPublishMessages(t, rmqNamespace, connectionString, queueName, messageCount) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") t.Log("--- testing scale in ---") assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http_auth/rabbitmq_queue_http_auth_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http_auth/rabbitmq_queue_http_auth_test.go index a95d1924a8e..ed69492645b 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http_auth/rabbitmq_queue_http_auth_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http_auth/rabbitmq_queue_http_auth_test.go @@ -20,7 +20,7 @@ import ( var _ = godotenv.Load("../../../.env") const ( - testName = "rmq-queue-http-test" + testName = "rmq-queue-http-test-auth" ) var ( diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http_vhost/rabbitmq_queue_http_vhost_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http_vhost/rabbitmq_queue_http_vhost_test.go index 9dbefd50480..0720b0fe074 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http_vhost/rabbitmq_queue_http_vhost_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http_vhost/rabbitmq_queue_http_vhost_test.go @@ -110,8 +110,8 @@ func getTemplateData() (templateData, []Template) { func testScaling(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing scale out ---") RMQPublishMessages(t, rmqNamespace, connectionString, queueName, messageCount) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") t.Log("--- testing scale in ---") assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), diff --git a/tests/scalers/selenium/selenium_test.go b/tests/scalers/selenium/selenium_test.go index 8d4b43cef19..e540b1f74c8 100644 --- a/tests/scalers/selenium/selenium_test.go +++ b/tests/scalers/selenium/selenium_test.go @@ -24,33 +24,47 @@ const ( ) var ( - testNamespace = fmt.Sprintf("%s-ns", testName) - chromeDeploymentName = fmt.Sprintf("%s-chrome", testName) - firefoxDeploymentName = fmt.Sprintf("%s-firefox", testName) - edgeDeploymentName = fmt.Sprintf("%s-edge", testName) - hubDeploymentName = fmt.Sprintf("%s-hub", testName) - scaledObjectName = fmt.Sprintf("%s-so", testName) - hubHost = fmt.Sprintf("selenium-hub.%s", testNamespace) - hubPort = 4444 - hubGraphURL = fmt.Sprintf("http://%s:%d/graphql", hubHost, hubPort) - minReplicaCount = 0 - maxReplicaCount = 1 + testNamespace = fmt.Sprintf("%s-ns", testName) + secretName = fmt.Sprintf("%s-secret", testName) + triggerAuthName = fmt.Sprintf("%s-trigger-auth", testName) + chromeDeploymentName = fmt.Sprintf("%s-chrome", testName) + firefoxDeploymentName = fmt.Sprintf("%s-firefox", testName) + edgeDeploymentName = fmt.Sprintf("%s-edge", testName) + hubDeploymentName = fmt.Sprintf("%s-hub", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + hubHost = fmt.Sprintf("%s:%s@selenium-hub.%s", hubBasicAuthUsername, hubBasicAuthPassword, testNamespace) + hubPort = 4444 + hubGraphURL = fmt.Sprintf("http://selenium-hub.%s:%d/graphql", testNamespace, hubPort) + hubBasicAuthUsername = "admin" + hubBasicAuthPassword = "admin" + hubBasicAuthUsernameB64enc = "YWRtaW4=" + hubBasicAuthPasswordB64enc = "YWRtaW4=" + hubBasicAuthHeader = "YWRtaW46YWRtaW4=" + minReplicaCount = 0 + maxReplicaCount = 1 ) type templateData struct { - TestNamespace string - ChromeDeploymentName string - FirefoxDeploymentName string - EdgeDeploymentName string - HubDeploymentName string - HubHost string - HubPort int - HubGraphURL string - WithVersion bool - JobName string - ScaledObjectName string - MinReplicaCount int - MaxReplicaCount int + TestNamespace string + SecretName string + TriggerAuthName string + ChromeDeploymentName string + FirefoxDeploymentName string + EdgeDeploymentName string + HubDeploymentName string + HubHost string + HubPort int + HubGraphURL string + HubBasicAuthUsername string + HubBasicAuthPassword string + HubBasicAuthUsernameB64enc string + HubBasicAuthPasswordB64enc string + HubBasicAuthHeader string + WithVersion bool + JobName string + ScaledObjectName string + MinReplicaCount int + MaxReplicaCount int } const ( @@ -63,9 +77,9 @@ metadata: labels: app.kubernetes.io/managed-by: helm app.kubernetes.io/instance: selenium-hpa - app.kubernetes.io/version: 4.0.0-beta-1-prerelease-20210114 - app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 - helm.sh/chart: selenium-grid-0.2.0 + app.kubernetes.io/version: latest + app.kubernetes.io/component: latest + helm.sh/chart: latest data: SE_EVENT_BUS_HOST: selenium-hub SE_EVENT_BUS_PUBLISH_PORT: "4442" @@ -82,9 +96,9 @@ metadata: name: selenium-chrome-node app.kubernetes.io/managed-by: helm app.kubernetes.io/instance: selenium-hpa - app.kubernetes.io/version: 4.0.0-beta-1-prerelease-20210114 - app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 - helm.sh/chart: selenium-grid-0.2.0 + app.kubernetes.io/version: latest + app.kubernetes.io/component: latest + helm.sh/chart: latest spec: type: ClusterIP selector: @@ -107,9 +121,9 @@ metadata: app.kubernetes.io/name: selenium-chrome-node app.kubernetes.io/managed-by: helm app.kubernetes.io/instance: selenium-hpa - app.kubernetes.io/version: 4.0.0-beta-1-prerelease-20210114 - app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 - helm.sh/chart: selenium-grid-0.2.0 + app.kubernetes.io/version: latest + app.kubernetes.io/component: latest + helm.sh/chart: latest spec: replicas: 0 selector: @@ -123,7 +137,7 @@ spec: spec: containers: - name: selenium-chrome-node - image: selenium/node-chrome:4.0.0-rc-1-prerelease-20210618 + image: selenium/node-chrome:latest imagePullPolicy: IfNotPresent envFrom: - configMapRef: @@ -141,6 +155,34 @@ spec: sizeLimit: 1Gi ` + secretTemplate = ` +apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +type: Opaque +data: + username: '{{.HubBasicAuthUsernameB64enc}}' + password: '{{.HubBasicAuthPasswordB64enc}}' +` + + scaledTriggerAuthTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthName}} + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: username + name: {{.SecretName}} + key: username + - parameter: password + name: {{.SecretName}} + key: password +` + chromeScaledObjectTemplate = ` apiVersion: keda.sh/v1alpha1 kind: ScaledObject @@ -159,6 +201,8 @@ spec: url: '{{.HubGraphURL}}' browserName: 'chrome' activationThreshold: '1' + authenticationRef: + name: '{{.TriggerAuthName}}' ` firefoxNodeServiceTemplate = ` @@ -171,9 +215,9 @@ metadata: name: selenium-firefox-node app.kubernetes.io/managed-by: helm app.kubernetes.io/instance: selenium-hpa - app.kubernetes.io/version: 4.0.0-beta-1-prerelease-20210114 - app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 - helm.sh/chart: selenium-grid-0.2.0 + app.kubernetes.io/version: latest + app.kubernetes.io/component: latest + helm.sh/chart: latest spec: type: ClusterIP selector: @@ -195,9 +239,9 @@ metadata: app.kubernetes.io/name: selenium-firefox-node app.kubernetes.io/managed-by: helm app.kubernetes.io/instance: selenium-hpa - app.kubernetes.io/version: 4.0.0-beta-1-prerelease-20210114 - app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 - helm.sh/chart: selenium-grid-0.2.0 + app.kubernetes.io/version: latest + app.kubernetes.io/component: latest + helm.sh/chart: latest spec: replicas: 0 selector: @@ -211,7 +255,7 @@ spec: spec: containers: - name: selenium-firefox-node - image: selenium/node-firefox:4.0.0-rc-1-prerelease-20210618 + image: selenium/node-firefox:latest imagePullPolicy: IfNotPresent envFrom: - configMapRef: @@ -247,6 +291,8 @@ spec: url: '{{.HubGraphURL}}' browserName: 'firefox' activationThreshold: '1' + authenticationRef: + name: '{{.TriggerAuthName}}' ` edgeNodeServiceTemplate = ` @@ -259,9 +305,9 @@ metadata: name: selenium-edge-node app.kubernetes.io/managed-by: helm app.kubernetes.io/instance: selenium-hpa - app.kubernetes.io/version: 4.0.0-beta-1-prerelease-20210114 - app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 - helm.sh/chart: selenium-grid-0.2.0 + app.kubernetes.io/version: latest + app.kubernetes.io/component: latest + helm.sh/chart: latest spec: type: ClusterIP selector: @@ -284,9 +330,9 @@ metadata: app.kubernetes.io/name: selenium-edge-node app.kubernetes.io/managed-by: helm app.kubernetes.io/instance: selenium-hpa - app.kubernetes.io/version: 4.0.0-beta-1-prerelease-20210114 - app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 - helm.sh/chart: selenium-grid-0.2.0 + app.kubernetes.io/version: latest + app.kubernetes.io/component: latest + helm.sh/chart: latest spec: replicas: 0 selector: @@ -300,7 +346,7 @@ spec: spec: containers: - name: selenium-edge-node - image: selenium/node-edge:4.0.0-rc-1-prerelease-20210618 + image: selenium/node-edge:latest imagePullPolicy: IfNotPresent envFrom: - configMapRef: @@ -337,6 +383,8 @@ spec: browserName: 'MicrosoftEdge' sessionBrowserName: 'msedge' activationThreshold: '1' + authenticationRef: + name: '{{.TriggerAuthName}}' ` hubServiceTemplate = ` @@ -349,9 +397,9 @@ metadata: app: selenium-hub app.kubernetes.io/managed-by: helm app.kubernetes.io/instance: selenium-hpa - app.kubernetes.io/version: 4.0.0-beta-1-prerelease-20210114 - app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 - helm.sh/chart: selenium-grid-0.2.0 + app.kubernetes.io/version: latest + app.kubernetes.io/component: latest + helm.sh/chart: latest spec: selector: app: selenium-hub @@ -382,9 +430,9 @@ metadata: app.kubernetes.io/name: selenium-hub app.kubernetes.io/managed-by: helm app.kubernetes.io/instance: selenium-hpa - app.kubernetes.io/version: 4.0.0-beta-1-prerelease-20210114 - app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 - helm.sh/chart: selenium-grid-0.2.0 + app.kubernetes.io/version: latest + app.kubernetes.io/component: latest + helm.sh/chart: latest spec: replicas: 1 selector: @@ -396,8 +444,13 @@ spec: spec: containers: - name: selenium-hub - image: selenium/hub:4.0.0-rc-1-prerelease-20210618 + image: selenium/hub:latest imagePullPolicy: IfNotPresent + env: + - name: SE_ROUTER_USERNAME + value: '{{.HubBasicAuthUsername}}' + - name: SE_ROUTER_PASSWORD + value: '{{.HubBasicAuthPassword}}' ports: - containerPort: 4444 protocol: TCP @@ -409,6 +462,9 @@ spec: httpGet: path: /wd/hub/status port: 4444 + httpHeaders: + - name: Authorization + value: Basic {{.HubBasicAuthHeader}} initialDelaySeconds: 10 periodSeconds: 10 timeoutSeconds: 10 @@ -418,6 +474,9 @@ spec: httpGet: path: /wd/hub/status port: 4444 + httpHeaders: + - name: Authorization + value: Basic {{.HubBasicAuthHeader}} initialDelaySeconds: 12 periodSeconds: 10 timeoutSeconds: 10 @@ -519,18 +578,27 @@ func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { func getTemplateData() (templateData, []Template) { return templateData{ - TestNamespace: testNamespace, - ChromeDeploymentName: chromeDeploymentName, - FirefoxDeploymentName: firefoxDeploymentName, - EdgeDeploymentName: edgeDeploymentName, - HubDeploymentName: hubDeploymentName, - HubHost: hubHost, - HubPort: hubPort, - HubGraphURL: hubGraphURL, - ScaledObjectName: scaledObjectName, - MinReplicaCount: minReplicaCount, - MaxReplicaCount: maxReplicaCount, + TestNamespace: testNamespace, + SecretName: secretName, + TriggerAuthName: triggerAuthName, + ChromeDeploymentName: chromeDeploymentName, + FirefoxDeploymentName: firefoxDeploymentName, + EdgeDeploymentName: edgeDeploymentName, + HubDeploymentName: hubDeploymentName, + HubHost: hubHost, + HubPort: hubPort, + HubGraphURL: hubGraphURL, + HubBasicAuthUsername: hubBasicAuthUsername, + HubBasicAuthPassword: hubBasicAuthPassword, + HubBasicAuthUsernameB64enc: hubBasicAuthUsernameB64enc, + HubBasicAuthPasswordB64enc: hubBasicAuthPasswordB64enc, + HubBasicAuthHeader: hubBasicAuthHeader, + ScaledObjectName: scaledObjectName, + MinReplicaCount: minReplicaCount, + MaxReplicaCount: maxReplicaCount, }, []Template{ + {Name: "secretTemplate", Config: secretTemplate}, + {Name: "scaledTriggerAuthTemplate", Config: scaledTriggerAuthTemplate}, {Name: "eventBusConfigTemplate", Config: eventBusConfigTemplate}, {Name: "hubDeploymentTemplate", Config: hubDeploymentTemplate}, {Name: "hubServiceTemplate", Config: hubServiceTemplate}, diff --git a/tests/scalers/solace/solace_test.go b/tests/scalers/solace/solace_test.go index 13ead35fa2e..86319017ae3 100644 --- a/tests/scalers/solace/solace_test.go +++ b/tests/scalers/solace/solace_test.go @@ -207,16 +207,18 @@ spec: func TestStanScaler(t *testing.T) { kc := GetKubernetesClient(t) data, templates := getTemplateData() + + // Create kubernetes resources + CreateKubernetesResources(t, kc, testNamespace, data, templates) + installSolace(t) + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + t.Cleanup(func() { KubectlDeleteWithTemplate(t, data, "scaledObjectTemplateRate", scaledObjectTemplateRate) uninstallSolace(t) DeleteKubernetesResources(t, testNamespace, data, templates) }) - // Create kubernetes resources - CreateKubernetesResources(t, kc, testNamespace, data, templates) - installSolace(t) - KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1), "replica count should be 0 after 1 minute") @@ -236,11 +238,9 @@ func installSolace(t *testing.T) { require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand("helm repo update") require.NoErrorf(t, err, "cannot execute command - %s", err) - _, err = ExecuteCommand(fmt.Sprintf(`helm upgrade --install --set solace.usernameAdminPassword=KedaLabAdminPwd1 --set storage.persistent=false,solace.size=dev,nameOverride=pubsubplus-dev,service.type=ClusterIP --namespace %s kedalab solacecharts/pubsubplus`, + _, err = ExecuteCommand(fmt.Sprintf(`helm upgrade --install --set solace.usernameAdminPassword=KedaLabAdminPwd1 --set storage.persistent=false,solace.size=dev,nameOverride=pubsubplus-dev,service.type=ClusterIP --wait --namespace %s kedalab solacecharts/pubsubplus`, testNamespace)) require.NoErrorf(t, err, "cannot execute command - %s", err) - _, err = ExecuteCommand("sleep 60") // there is a bug in the solace helm chart where it is looking for the wrong number of replicas on --wait - require.NoErrorf(t, err, "cannot execute command - %s", err) // Create the pubsub broker _, _, err = ExecCommandOnSpecificPod(t, helperName, testNamespace, "./config/config_solace.sh") require.NoErrorf(t, err, "cannot execute command - %s", err) diff --git a/tests/scalers/datadog/datadog_dca/datadog_dca_test.go b/tests/sequential/datadog_dca/datadog_dca_test.go similarity index 97% rename from tests/scalers/datadog/datadog_dca/datadog_dca_test.go rename to tests/sequential/datadog_dca/datadog_dca_test.go index 66512515484..488b6ebc7b4 100644 --- a/tests/scalers/datadog/datadog_dca/datadog_dca_test.go +++ b/tests/sequential/datadog_dca/datadog_dca_test.go @@ -1,6 +1,10 @@ //go:build e2e // +build e2e +// Temporally moved to standalone e2e as I found that the DD Agent autogenerates DatadogMetric from other +// unrelated HPAs. Until we get a response about how to disable this, the best solution is moving this test +// to run standalone. We should move back it again once we solve this problem + package datadog_dca_test import ( diff --git a/tests/utils/helper/helper.go b/tests/utils/helper/helper.go index 039e5546b8c..3c618a8e4a6 100644 --- a/tests/utils/helper/helper.go +++ b/tests/utils/helper/helper.go @@ -51,8 +51,7 @@ image: repository: "otel/opentelemetry-collector-contrib" config: exporters: - logging: - loglevel: debug + debug: {} prometheus: endpoint: 0.0.0.0:8889 receivers: @@ -72,7 +71,7 @@ config: receivers: - otlp exporters: - - logging + - debug - prometheus logs: null ` diff --git a/vendor/github.com/beanstalkd/go-beanstalk/License b/vendor/github.com/beanstalkd/go-beanstalk/License new file mode 100644 index 00000000000..183c3898c36 --- /dev/null +++ b/vendor/github.com/beanstalkd/go-beanstalk/License @@ -0,0 +1,22 @@ +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beanstalkd/go-beanstalk/Readme.md b/vendor/github.com/beanstalkd/go-beanstalk/Readme.md new file mode 100644 index 00000000000..15fd3b96330 --- /dev/null +++ b/vendor/github.com/beanstalkd/go-beanstalk/Readme.md @@ -0,0 +1,19 @@ +# Beanstalk + +Go client for [beanstalkd](https://beanstalkd.github.io). + +## Install + + $ go get github.com/beanstalkd/go-beanstalk + +## Use + +Produce jobs: + + c, err := beanstalk.Dial("tcp", "127.0.0.1:11300") + id, err := c.Put([]byte("hello"), 1, 0, 120*time.Second) + +Consume jobs: + + c, err := beanstalk.Dial("tcp", "127.0.0.1:11300") + id, body, err := c.Reserve(5 * time.Second) diff --git a/vendor/github.com/beanstalkd/go-beanstalk/conn.go b/vendor/github.com/beanstalkd/go-beanstalk/conn.go new file mode 100644 index 00000000000..7c3eb40d83c --- /dev/null +++ b/vendor/github.com/beanstalkd/go-beanstalk/conn.go @@ -0,0 +1,295 @@ +package beanstalk + +import ( + "fmt" + "io" + "net" + "net/textproto" + "strings" + "time" +) + +// DefaultDialTimeout is the time to wait for a connection to the beanstalk server. +const DefaultDialTimeout = 10 * time.Second + +// DefaultKeepAlivePeriod is the default period between TCP keepalive messages. +const DefaultKeepAlivePeriod = 10 * time.Second + +// A Conn represents a connection to a beanstalkd server. It consists +// of a default Tube and TubeSet as well as the underlying network +// connection. The embedded types carry methods with them; see the +// documentation of those types for details. +type Conn struct { + c *textproto.Conn + used string + watched map[string]bool + Tube + TubeSet +} + +var ( + space = []byte{' '} + crnl = []byte{'\r', '\n'} + yamlHead = []byte{'-', '-', '-', '\n'} + nl = []byte{'\n'} + colonSpace = []byte{':', ' '} + minusSpace = []byte{'-', ' '} +) + +// NewConn returns a new Conn using conn for I/O. +func NewConn(conn io.ReadWriteCloser) *Conn { + c := new(Conn) + c.c = textproto.NewConn(conn) + c.Tube = *NewTube(c, "default") + c.TubeSet = *NewTubeSet(c, "default") + c.used = "default" + c.watched = map[string]bool{"default": true} + return c +} + +// Dial connects addr on the given network using net.DialTimeout +// with a default timeout of 10s and then returns a new Conn for the connection. +func Dial(network, addr string) (*Conn, error) { + return DialTimeout(network, addr, DefaultDialTimeout) +} + +// DialTimeout connects addr on the given network using net.DialTimeout +// with a supplied timeout and then returns a new Conn for the connection. +func DialTimeout(network, addr string, timeout time.Duration) (*Conn, error) { + dialer := &net.Dialer{ + Timeout: timeout, + KeepAlive: DefaultKeepAlivePeriod, + } + c, err := dialer.Dial(network, addr) + if err != nil { + return nil, err + } + return NewConn(c), nil +} + +// Close closes the underlying network connection. +func (c *Conn) Close() error { + return c.c.Close() +} + +func (c *Conn) cmd(t *Tube, ts *TubeSet, body []byte, op string, args ...interface{}) (req, error) { + // negative dur checking + for _, arg := range args { + if d, _ := arg.(dur); d < 0 { + return req{}, fmt.Errorf("duration must be non-negative, got %v", time.Duration(d)) + } + } + + r := req{c.c.Next(), op} + c.c.StartRequest(r.id) + defer c.c.EndRequest(r.id) + err := c.adjustTubes(t, ts) + if err != nil { + return req{}, err + } + if body != nil { + args = append(args, len(body)) + } + c.printLine(op, args...) + if body != nil { + c.c.W.Write(body) + c.c.W.Write(crnl) + } + err = c.c.W.Flush() + if err != nil { + return req{}, ConnError{c, op, err} + } + return r, nil +} + +func (c *Conn) adjustTubes(t *Tube, ts *TubeSet) error { + if t != nil && t.Name != c.used { + if err := checkName(t.Name); err != nil { + return err + } + c.printLine("use", t.Name) + c.used = t.Name + } + if ts != nil { + for s := range ts.Name { + if !c.watched[s] { + if err := checkName(s); err != nil { + return err + } + c.printLine("watch", s) + } + } + for s := range c.watched { + if !ts.Name[s] { + c.printLine("ignore", s) + } + } + c.watched = make(map[string]bool) + for s := range ts.Name { + c.watched[s] = true + } + } + return nil +} + +// does not flush +func (c *Conn) printLine(cmd string, args ...interface{}) { + io.WriteString(c.c.W, cmd) + for _, a := range args { + c.c.W.Write(space) + fmt.Fprint(c.c.W, a) + } + c.c.W.Write(crnl) +} + +func (c *Conn) readResp(r req, readBody bool, f string, a ...interface{}) (body []byte, err error) { + c.c.StartResponse(r.id) + defer c.c.EndResponse(r.id) + line, err := c.c.ReadLine() + for strings.HasPrefix(line, "WATCHING ") || strings.HasPrefix(line, "USING ") { + line, err = c.c.ReadLine() + } + if err != nil { + return nil, ConnError{c, r.op, err} + } + toScan := line + if readBody { + var size int + toScan, size, err = parseSize(toScan) + if err != nil { + return nil, ConnError{c, r.op, err} + } + body = make([]byte, size+2) // include trailing CR NL + _, err = io.ReadFull(c.c.R, body) + if err != nil { + return nil, ConnError{c, r.op, err} + } + body = body[:size] // exclude trailing CR NL + } + + err = scan(toScan, f, a...) + if err != nil { + return nil, ConnError{c, r.op, err} + } + return body, nil +} + +// Delete deletes the given job. +func (c *Conn) Delete(id uint64) error { + r, err := c.cmd(nil, nil, nil, "delete", id) + if err != nil { + return err + } + _, err = c.readResp(r, false, "DELETED") + return err +} + +// Release tells the server to perform the following actions: +// set the priority of the given job to pri, remove it from the list of +// jobs reserved by c, wait delay seconds, then place the job in the +// ready queue, which makes it available for reservation by any client. +func (c *Conn) Release(id uint64, pri uint32, delay time.Duration) error { + r, err := c.cmd(nil, nil, nil, "release", id, pri, dur(delay)) + if err != nil { + return err + } + _, err = c.readResp(r, false, "RELEASED") + return err +} + +// Bury places the given job in a holding area in the job's tube and +// sets its priority to pri. The job will not be scheduled again until it +// has been kicked; see also the documentation of Kick. +func (c *Conn) Bury(id uint64, pri uint32) error { + r, err := c.cmd(nil, nil, nil, "bury", id, pri) + if err != nil { + return err + } + _, err = c.readResp(r, false, "BURIED") + return err +} + +// KickJob places the given job to the ready queue of the same tube where it currently belongs +// when the given job id exists and is in a buried or delayed state. +func (c *Conn) KickJob(id uint64) error { + r, err := c.cmd(nil, nil, nil, "kick-job", id) + if err != nil { + return err + } + _, err = c.readResp(r, false, "KICKED") + return err +} + +// Touch resets the reservation timer for the given job. +// It is an error if the job isn't currently reserved by c. +// See the documentation of Reserve for more details. +func (c *Conn) Touch(id uint64) error { + r, err := c.cmd(nil, nil, nil, "touch", id) + if err != nil { + return err + } + _, err = c.readResp(r, false, "TOUCHED") + return err +} + +// Peek gets a copy of the specified job from the server. +func (c *Conn) Peek(id uint64) (body []byte, err error) { + r, err := c.cmd(nil, nil, nil, "peek", id) + if err != nil { + return nil, err + } + return c.readResp(r, true, "FOUND %d", &id) +} + +// ReserveJob reserves the specified job by id from the server. +func (c *Conn) ReserveJob(id uint64) (body []byte, err error) { + r, err := c.cmd(nil, nil, nil, "reserve-job", id) + if err != nil { + return nil, err + } + return c.readResp(r, true, "RESERVED %d", &id) +} + +// Stats retrieves global statistics from the server. +func (c *Conn) Stats() (map[string]string, error) { + r, err := c.cmd(nil, nil, nil, "stats") + if err != nil { + return nil, err + } + body, err := c.readResp(r, true, "OK") + return parseDict(body), err +} + +// StatsJob retrieves statistics about the given job. +func (c *Conn) StatsJob(id uint64) (map[string]string, error) { + r, err := c.cmd(nil, nil, nil, "stats-job", id) + if err != nil { + return nil, err + } + body, err := c.readResp(r, true, "OK") + return parseDict(body), err +} + +// ListTubes returns the names of the tubes that currently +// exist on the server. +func (c *Conn) ListTubes() ([]string, error) { + r, err := c.cmd(nil, nil, nil, "list-tubes") + if err != nil { + return nil, err + } + body, err := c.readResp(r, true, "OK") + return parseList(body), err +} + +func scan(input, format string, a ...interface{}) error { + _, err := fmt.Sscanf(input, format, a...) + if err != nil { + return findRespError(input) + } + return nil +} + +type req struct { + id uint + op string +} diff --git a/vendor/github.com/beanstalkd/go-beanstalk/doc.go b/vendor/github.com/beanstalkd/go-beanstalk/doc.go new file mode 100644 index 00000000000..7bb685e008a --- /dev/null +++ b/vendor/github.com/beanstalkd/go-beanstalk/doc.go @@ -0,0 +1,6 @@ +// Package beanstalk provides a client for the beanstalk protocol. +// See http://kr.github.com/beanstalkd/ for the server. +// +// This package is synchronized internally and safe to use from +// multiple goroutines without other coordination. +package beanstalk diff --git a/vendor/github.com/beanstalkd/go-beanstalk/err.go b/vendor/github.com/beanstalkd/go-beanstalk/err.go new file mode 100644 index 00000000000..66a38512a1b --- /dev/null +++ b/vendor/github.com/beanstalkd/go-beanstalk/err.go @@ -0,0 +1,63 @@ +package beanstalk + +import "errors" + +// ConnError records an error message from the server and the operation +// and connection that caused it. +type ConnError struct { + Conn *Conn + Op string + Err error +} + +func (e ConnError) Error() string { + return e.Op + ": " + e.Err.Error() +} + +func (e ConnError) Unwrap() error { + return e.Err +} + +// Error messages returned by the server. +var ( + ErrBadFormat = errors.New("bad command format") + ErrBuried = errors.New("buried") + ErrDeadline = errors.New("deadline soon") + ErrDraining = errors.New("draining") + ErrInternal = errors.New("internal error") + ErrJobTooBig = errors.New("job too big") + ErrNoCRLF = errors.New("expected CR LF") + ErrNotFound = errors.New("not found") + ErrNotIgnored = errors.New("not ignored") + ErrOOM = errors.New("server is out of memory") + ErrTimeout = errors.New("timeout") + ErrUnknown = errors.New("unknown command") +) + +var respError = map[string]error{ + "BAD_FORMAT": ErrBadFormat, + "BURIED": ErrBuried, + "DEADLINE_SOON": ErrDeadline, + "DRAINING": ErrDraining, + "EXPECTED_CRLF": ErrNoCRLF, + "INTERNAL_ERROR": ErrInternal, + "JOB_TOO_BIG": ErrJobTooBig, + "NOT_FOUND": ErrNotFound, + "NOT_IGNORED": ErrNotIgnored, + "OUT_OF_MEMORY": ErrOOM, + "TIMED_OUT": ErrTimeout, + "UNKNOWN_COMMAND": ErrUnknown, +} + +type unknownRespError string + +func (e unknownRespError) Error() string { + return "unknown response: " + string(e) +} + +func findRespError(s string) error { + if err := respError[s]; err != nil { + return err + } + return unknownRespError(s) +} diff --git a/vendor/github.com/beanstalkd/go-beanstalk/name.go b/vendor/github.com/beanstalkd/go-beanstalk/name.go new file mode 100644 index 00000000000..5a85b2d41c6 --- /dev/null +++ b/vendor/github.com/beanstalkd/go-beanstalk/name.go @@ -0,0 +1,55 @@ +package beanstalk + +import ( + "errors" +) + +// NameChars are the allowed name characters in the beanstalkd protocol. +const NameChars = `\-+/;.$_()0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz` + +// NameError indicates that a name was malformed and the specific error +// describing how. +type NameError struct { + Name string + Err error +} + +func (e NameError) Error() string { + return e.Err.Error() + ": " + e.Name +} + +func (e NameError) Unwrap() error { + return e.Err +} + +// Name format errors. The Err field of NameError contains one of these. +var ( + ErrEmpty = errors.New("name is empty") + ErrBadChar = errors.New("name has bad char") // contains a character not in NameChars + ErrTooLong = errors.New("name is too long") +) + +func checkName(s string) error { + switch { + case len(s) == 0: + return NameError{s, ErrEmpty} + case len(s) >= 200: + return NameError{s, ErrTooLong} + case !containsOnly(s, NameChars): + return NameError{s, ErrBadChar} + } + return nil +} + +func containsOnly(s, chars string) bool { +outer: + for _, c := range s { + for _, m := range chars { + if c == m { + continue outer + } + } + return false + } + return true +} diff --git a/vendor/github.com/beanstalkd/go-beanstalk/parse.go b/vendor/github.com/beanstalkd/go-beanstalk/parse.go new file mode 100644 index 00000000000..091ab86e8f3 --- /dev/null +++ b/vendor/github.com/beanstalkd/go-beanstalk/parse.go @@ -0,0 +1,54 @@ +package beanstalk + +import ( + "bytes" + "strconv" + "strings" +) + +func parseDict(dat []byte) map[string]string { + if dat == nil { + return nil + } + d := make(map[string]string) + if bytes.HasPrefix(dat, yamlHead) { + dat = dat[4:] + } + for _, s := range bytes.Split(dat, nl) { + kv := bytes.SplitN(s, colonSpace, 2) + if len(kv) != 2 { + continue + } + d[string(kv[0])] = string(kv[1]) + } + return d +} + +func parseList(dat []byte) []string { + if dat == nil { + return nil + } + l := []string{} + if bytes.HasPrefix(dat, yamlHead) { + dat = dat[4:] + } + for _, s := range bytes.Split(dat, nl) { + if !bytes.HasPrefix(s, minusSpace) { + continue + } + l = append(l, string(s[2:])) + } + return l +} + +func parseSize(s string) (string, int, error) { + i := strings.LastIndex(s, " ") + if i == -1 { + return "", 0, findRespError(s) + } + n, err := strconv.Atoi(s[i+1:]) + if err != nil { + return "", 0, err + } + return s[:i], n, nil +} diff --git a/vendor/github.com/beanstalkd/go-beanstalk/time.go b/vendor/github.com/beanstalkd/go-beanstalk/time.go new file mode 100644 index 00000000000..fd128cbd849 --- /dev/null +++ b/vendor/github.com/beanstalkd/go-beanstalk/time.go @@ -0,0 +1,12 @@ +package beanstalk + +import ( + "strconv" + "time" +) + +type dur time.Duration + +func (d dur) String() string { + return strconv.FormatInt(int64(time.Duration(d)/time.Second), 10) +} diff --git a/vendor/github.com/beanstalkd/go-beanstalk/tube.go b/vendor/github.com/beanstalkd/go-beanstalk/tube.go new file mode 100644 index 00000000000..fe7baf7e536 --- /dev/null +++ b/vendor/github.com/beanstalkd/go-beanstalk/tube.go @@ -0,0 +1,112 @@ +package beanstalk + +import ( + "time" +) + +// Tube represents tube Name on the server connected to by Conn. +// It has methods for commands that operate on a single tube. +type Tube struct { + Conn *Conn + Name string +} + +// NewTube returns a new Tube representing the given name. +func NewTube(c *Conn, name string) *Tube { + return &Tube{c, name} +} + +// Put puts a job into tube t with priority pri and TTR ttr, and returns +// the id of the newly-created job. If delay is nonzero, the server will +// wait the given amount of time after returning to the client and before +// putting the job into the ready queue. +func (t *Tube) Put(body []byte, pri uint32, delay, ttr time.Duration) (id uint64, err error) { + r, err := t.Conn.cmd(t, nil, body, "put", pri, dur(delay), dur(ttr)) + if err != nil { + return 0, err + } + _, err = t.Conn.readResp(r, false, "INSERTED %d", &id) + if err != nil { + return 0, err + } + return id, nil +} + +// PeekReady gets a copy of the job at the front of t's ready queue. +func (t *Tube) PeekReady() (id uint64, body []byte, err error) { + r, err := t.Conn.cmd(t, nil, nil, "peek-ready") + if err != nil { + return 0, nil, err + } + body, err = t.Conn.readResp(r, true, "FOUND %d", &id) + if err != nil { + return 0, nil, err + } + return id, body, nil +} + +// PeekDelayed gets a copy of the delayed job that is next to be +// put in t's ready queue. +func (t *Tube) PeekDelayed() (id uint64, body []byte, err error) { + r, err := t.Conn.cmd(t, nil, nil, "peek-delayed") + if err != nil { + return 0, nil, err + } + body, err = t.Conn.readResp(r, true, "FOUND %d", &id) + if err != nil { + return 0, nil, err + } + return id, body, nil +} + +// PeekBuried gets a copy of the job in the holding area that would +// be kicked next by Kick. +func (t *Tube) PeekBuried() (id uint64, body []byte, err error) { + r, err := t.Conn.cmd(t, nil, nil, "peek-buried") + if err != nil { + return 0, nil, err + } + body, err = t.Conn.readResp(r, true, "FOUND %d", &id) + if err != nil { + return 0, nil, err + } + return id, body, nil +} + +// Kick takes up to bound jobs from the holding area and moves them into +// the ready queue, then returns the number of jobs moved. Jobs will be +// taken in the order in which they were last buried. +func (t *Tube) Kick(bound int) (n int, err error) { + r, err := t.Conn.cmd(t, nil, nil, "kick", bound) + if err != nil { + return 0, err + } + _, err = t.Conn.readResp(r, false, "KICKED %d", &n) + if err != nil { + return 0, err + } + return n, nil +} + +// Stats retrieves statistics about tube t. +func (t *Tube) Stats() (map[string]string, error) { + r, err := t.Conn.cmd(nil, nil, nil, "stats-tube", t.Name) + if err != nil { + return nil, err + } + body, err := t.Conn.readResp(r, true, "OK") + return parseDict(body), err +} + +// Pause pauses new reservations in t for time d. +func (t *Tube) Pause(d time.Duration) error { + r, err := t.Conn.cmd(nil, nil, nil, "pause-tube", t.Name, dur(d)) + if err != nil { + return err + } + _, err = t.Conn.readResp(r, false, "PAUSED") + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/beanstalkd/go-beanstalk/tubeset.go b/vendor/github.com/beanstalkd/go-beanstalk/tubeset.go new file mode 100644 index 00000000000..0b431e011c4 --- /dev/null +++ b/vendor/github.com/beanstalkd/go-beanstalk/tubeset.go @@ -0,0 +1,39 @@ +package beanstalk + +import ( + "time" +) + +// TubeSet represents a set of tubes on the server connected to by Conn. +// Name names the tubes represented. +type TubeSet struct { + Conn *Conn + Name map[string]bool +} + +// NewTubeSet returns a new TubeSet representing the given names. +func NewTubeSet(c *Conn, name ...string) *TubeSet { + ts := &TubeSet{c, make(map[string]bool)} + for _, s := range name { + ts.Name[s] = true + } + return ts +} + +// Reserve reserves and returns a job from one of the tubes in t. If no +// job is available before time timeout has passed, Reserve returns a +// ConnError recording ErrTimeout. +// +// Typically, a client will reserve a job, perform some work, then delete +// the job with Conn.Delete. +func (t *TubeSet) Reserve(timeout time.Duration) (id uint64, body []byte, err error) { + r, err := t.Conn.cmd(nil, t, nil, "reserve-with-timeout", dur(timeout)) + if err != nil { + return 0, nil, err + } + body, err = t.Conn.readResp(r, true, "RESERVED %d", &id) + if err != nil { + return 0, nil, err + } + return id, body, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index af45eca4758..07f8e3baa61 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -456,6 +456,9 @@ github.com/aws/smithy-go/time github.com/aws/smithy-go/transport/http github.com/aws/smithy-go/transport/http/internal/io github.com/aws/smithy-go/waiter +# github.com/beanstalkd/go-beanstalk v0.2.0 +## explicit; go 1.14 +github.com/beanstalkd/go-beanstalk # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile