Skip to content

Commit

Permalink
Merge pull request #14 from jfrog/e2e
Browse files Browse the repository at this point in the history
Adding e2e test cases
  • Loading branch information
oumkale authored Dec 14, 2023
2 parents 6a8349a + fb9aa99 commit 09c7ad7
Show file tree
Hide file tree
Showing 10 changed files with 375 additions and 0 deletions.
29 changes: 29 additions & 0 deletions tests/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# E2E testing for operators using kuttl framework

There are two ways to run the integration tests locally.
If you are writing the tests make sure to test it locally using this approach.

## 1) Using Vcluster

### Prerequisites
* Access to an existing kubernetes cluster with kubectl installed [Refer](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/)
`brew install kubectl`
* Install Vcluster [Refer](https://www.vcluster.com/docs/getting-started/setup)
`brew install vcluster`
* Install helm [Refer](https://helm.sh/docs/intro/install/)
`brew install helm`
* Install kuttl [Refer](https://kuttl.dev/docs/cli.html#setup-the-kuttl-kubectl-plugin)
`brew tap kudobuilder/tap`
`brew install kuttl-cli`

### To run the tests
```shell
# Run tests from the operator folder
./kuttl.sh
```
To run specific tests
```shell
./kuttl.sh "--test install"
```
Note: the folder name is the test name.
You can also pass extra kuttl args using this approach.
7 changes: 7 additions & 0 deletions tests/e2e/install/00-assert.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: secretrotator-jfrog-registry-operator
status:
replicas: 1
readyReplicas: 1
8 changes: 8 additions & 0 deletions tests/e2e/install/00-install.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: echo "installing operator in $NAMESPACE"
- command: echo "Installing"
- command: helm upgrade --install secretrotator --set "serviceAccount.name=test" --namespace $NAMESPACE ../../../charts/jfrog-registry-operator/ -f customvalues.yaml
- command: sleep 5
timeout: 600
217 changes: 217 additions & 0 deletions tests/e2e/install/customvalues.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,217 @@
# Default values for JFrog Registry Operator.
global:
# imageRegistry: ""
## E.g.
imagePullSecrets: []
# storageClass: ""

image:
registry: releases-docker.jfrog.io
repository: jfrog/jfrog-registry-operator
tag: 1.0.0

pullPolicy: IfNotPresent
# pullSecrets:

## @param terminationGracePeriodSeconds Default duration in seconds k8s waits for container to exit before sending kill signal.
## Any time in excess of 10 seconds will be spent waiting for any synchronization necessary for cluster not to lose data.
##
terminationGracePeriodSeconds: 120

## @param extraEnvironmentVariables that can be used to tune jfrog-registry-operator to your needs.
## Example:
## extraEnvironmentVariables:
## - name: MY_ENV_VAR
## value: ""
extraEnvironmentVariables:

## @param replicaCount Number of jfrog-registry-operator replicas to deploy
##
replicaCount: 1

## @param podLabels jfrog-registry-operator Pod labels. Evaluated as a template
## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}

## @param podAnnotations jfrog-registry-operator Pod annotations. Evaluated as a template
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}


## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set.
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []


## @param deploymentLabels jfrog-registry-operator deployment labels. Evaluated as a template
## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
deploymentLabels:
AutomationCleanupIgnore: "true"
control-plane: controller-manager
app.kubernetes.io/name: namespace
app.kubernetes.io/instance: system
app.kubernetes.io/created-by: artifactory-secrets-rotator
app.kubernetes.io/part-of: artifactory-secrets-rotator

## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft

## jfrog-registry-operator pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param podSecurityContext.enabled Enable jfrog-registry-operator pods' Security Context
## @param podSecurityContext.fsGroup Group ID for the filesystem used by the containers
## @param podSecurityContext.runAsUser User ID for the service user running the pod
##
podSecurityContext:
runAsNonRoot: true
enabled: true
fsGroup: 1065
runAsUser: 1065

resources: {}
## Example:
## limits:
## cpu: 1000m
## memory: 2Gi
##
## Examples:
## requests:
## cpu: 1000m
## memory: 2Gi
##

## Specify common probes parameters
probes:
timeoutSeconds: 5

## The following settings are to configure the frequency of the liveness and startup probes when splitServicesToContainers set to true
livenessProbe:
enabled: true
config: |
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
enabled: true
config: |
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
startupProbe:
enabled: false

## jfrog-registry-operator pods ServiceAccount
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
## @param serviceAccount.create Enable creation of ServiceAccount for jfrog-registry-operator pods
##
create: true
## @param serviceAccount.name Name of the created serviceAccount
## If not set and create is true, a name is generated using the jfrog-registry-operator.fullname template
##
name: "jfrog-operator-sa"
## @param serviceAccount.automountServiceAccountToken Auto-mount the service account token in the pod
##

## Example:
annotations: |
eks.amazonaws.com/role-arn: arn:aws:iam::000000000000:role/jfrog-operator-role
automountServiceAccountToken: true

## Role Based Access
## ref: https://kubernetes.io/docs/admin/authorization/rbac/
##
rbac:
## @param rbac.create Whether RBAC rules should be created
## binding jfrog-registry-operator ServiceAccount to a role
## that allows jfrog-registry-operator pods querying the K8s API
##
create: true
persistence:
## @param persistence.enabled
##
enabled: false

## @param persistence.storageClass PVC Storage Class for metadata data volume
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
## @param persistence.selector Selector to match an existing Persistent Volume
## selector:
## matchLabels:
## app: my-app
##
selector: {}
## @param persistence.accessMode PVC Access Mode for metadata data volume
##
accessMode: ReadWriteOnce

## @param persistence.existingClaim Provide an existing PersistentVolumeClaims
## The value is evaluated as a template
## So, for example, the name can depend on .Release or .Chart
##
existingClaim: ""
## @param persistence.mountPath The path the volume will be mounted at
## Note: useful when using custom metadata images
##
mountPath: /var/opt/jfrog/jfrog-registry-operator
## @param persistence.subPath The subdirectory of the volume to mount to
## Useful in dev environments and one PV for multiple services
##
subPath: ""
## @param persistence.size PVC Storage Request for metadata data volume
## If you change this value, you might have to adjust `metadata.diskFreeLimit` as well
##
size: 8Gi

## @param persistence.volumes Additional volumes without creating PVC
## - name: volume_name
## emptyDir: {}
##
volumes: []
## @param persistence.annotations Persistence annotations. Evaluated as a template
## Example:
## annotations:
## example.io/disk-volume-type: SSD
##
annotations: {}

initContainers:
image:
registry: releases-docker.jfrog.io
repository: ubi9/ubi-minimal
tag: 9.2.717
pullPolicy: IfNotPresent
pullSecrets: []
67 changes: 67 additions & 0 deletions tests/e2e/kuttl-e2e.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
#!/usr/bin/env bash

if [[ $START_KIND != "true" ]]; then
# Prepare Vcluster
time=$(date +%Y%m%d%H%M%S)
kubectl create ns e2e-${time} || true
echo "Using namespace: e2e-${time}"
export NAMESPACE=e2e-${time}
echo "namespace: $NAMESPACE"

if [[ $2 == "ci" ]]; then
helm upgrade --install e2e-${time} entplus/vcluster --version ${VCLUSTER_VERSION} --namespace e2e-${time} -f ../vcluster-values.yaml
vcluster connect e2e-${time} &
vClusterPid=$!
else
vcluster create e2e-${time} -n e2e-${time} -f ../vcluster-values.yaml &
vClusterPid=$!
fi

# Wait for Vcluster to come up
ready=false
wait_period=0
while [[ $ready = false ]]
do
wait_period=$(($wait_period+1))
sleep 5
if [[ $wait_period -gt 20 ]];then
echo "Timed out waiting for vcluster to come up"
exit
fi
echo "Waiting for vcluster to be ready"
clusters=$(vcluster list --output json)
for cluster in $(echo "${clusters}" | jq -r -c '.[]'); do
vClusterName=$(echo "$cluster" | jq -r '.Name')
vClusterStatus=$(echo "$cluster" | jq -r '.Status')
currentContext=$(kubectl config current-context)
if [[ "$vClusterName" == "e2e-${time}" ]] && [[ "$vClusterStatus" == "Running" ]] && [[ "$currentContext" == *"e2e-$time"* ]];then
ready=true
fi
done
done

sleep 10

# Run integration tests using kuttl
echo "Running integrations tests"
if [[ $DEBUG_TESTS == "true" ]]; then
kubectl kuttl test --report xml --skip-delete $1
else
kubectl kuttl test --report xml $1
fi

if [[ $DEBUG_TESTS != "true" ]]; then
echo "Deleting Vcluster.."
vcluster delete e2e-${time}
kill -9 $vClusterPid
kubectl delete ns e2e-${time}
fi

else
kubectl kuttl test --start-kind --parallel 3 --report xml $1
fi

# Kuttl reports
echo "******************TEST RESULT*******************"
cat kuttl-report.xml
echo "*********************END************************"
3 changes: 3 additions & 0 deletions tests/e2e/kuttl-report.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
<testsuites name="" tests="0" failures="0" time="71.191">
<testsuite tests="0" failures="0" timestamp="2023-11-21T18:11:58.199235+05:30" time="0.000" name="../e2e"></testsuite>
</testsuites>
12 changes: 12 additions & 0 deletions tests/e2e/kuttl-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
apiVersion: kuttl.dev/v1beta1
kind: TestSuite
startKIND: false
name: e2e
testDirs:
- ../e2e
commands:
- command: chmod 600 kubeconfig
- command: helm repo add jfrog https://charts.jfrog.io
# - command: helm repo add dev https://entplus.jfrog.io/artifactory/helm-releases-local --username ${int_entplus_deployer_user} --password ${int_entplus_deployer_apikey}
- command: helm dep up ../../charts/jfrog-registry-operator
timeout: 1200
21 changes: 21 additions & 0 deletions tests/scripts/checkFileExistsInPod.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/usr/bin/env bash

type=$1
name=$2
containerName=$3
mountPath=$4

assert=false
while [[ "$assert" == "false" ]]; do
podName=$(kubectl get pods -l app.kubernetes.io/name="$name" -o=jsonpath='{.items[0].metadata.name}' -n $NAMESPACE 2> /dev/null)
fileExists=$(kubectl exec -it $podName -c "$containerName" -n $NAMESPACE -- bash -c "if [[ -f $mountPath ]] ; then echo true; else echo false ; fi" 2> /dev/null)
echo "Expected $name $mountPath to be present"
if [[ $fileExists ]]; then
assert=true
echo "$name container $mountPath check is successfull"
break
fi
sleep 10
done
kubectl rollout status "$type"/"$name" -n $NAMESPACE

9 changes: 9 additions & 0 deletions tests/scripts/checkPodsAreReady.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#!/usr/bin/env bash


while [[ $(kubectl get pods -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}' -n $NAMESPACE) == *"False"* ]]; do
echo "waiting for all pods to be in ready state" && sleep 1;
done
echo "All pods are ready"
sleep 5

2 changes: 2 additions & 0 deletions tests/vcluster-values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
coredns:
image: coredns/coredns:1.10.1

0 comments on commit 09c7ad7

Please sign in to comment.