Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Utilize the DruidIngestion controller in e2e tests #146

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,12 @@ docker-push-local-test: ## Push docker image with the manager to kind registry.
.PHONY: deploy-testjob
deploy-testjob: ## Run a wikipedia test pod
kubectl create job wiki-test --image=${IMG_KIND}:${TEST_IMG_TAG} -- sh /wikipedia-test.sh
bash e2e/monitor-task.sh
JOB_ID="wiki-test" bash e2e/monitor-task.sh

.PHONY: deploy-testingestionjob
deploy-testingestionjob: ## wait for the druidIngestion to complete and then verify dataset
kubectl create job ingestion-test --image=${IMG_KIND}:${TEST_IMG_TAG} -- sh /druid-ingestion-test.sh ${TASK_ID}
JOB_ID="ingestion-test" bash e2e/monitor-task.sh

.PHONY: helm-install-druid-operator
helm-install-druid-operator: ## Helm install to deploy the druid operator
Expand Down
40 changes: 40 additions & 0 deletions chart/templates/rbac_manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,26 @@ rules:
- get
- patch
- update
- apiGroups:
- druid.apache.org
resources:
- druidingestions
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- druid.apache.org
resources:
- druidingestions/status
verbs:
- get
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
Expand Down Expand Up @@ -284,6 +304,26 @@ rules:
- get
- patch
- update
- apiGroups:
- druid.apache.org
resources:
- druidingestions
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- druid.apache.org
resources:
- druidingestions/status
verbs:
- get
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
Expand Down
4 changes: 2 additions & 2 deletions controllers/ingestion/reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -345,8 +345,8 @@ func (r *DruidIngestionReconciler) getRouterSvcUrl(namespace, druidClusterName s
if svcName == "" {
return "", errors.New("router svc discovery fail")
}
// newName := "http://" + svcName + "." + namespace + ".svc.cluster.local:" + DruidRouterPort
newName := "http://localhost:" + DruidRouterPort

newName := "http://" + svcName + "." + namespace + ".svc.cluster.local:" + DruidRouterPort

return newName, nil
}
Expand Down
1 change: 1 addition & 0 deletions e2e/Dockerfile-testpod
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ RUN apk add --update-cache \
&& rm -rf /var/cache/apk/*

ADD e2e/wikipedia-test.sh .
ADD e2e/druid-ingestion-test.sh .
73 changes: 73 additions & 0 deletions e2e/configs/druid-ingestion-cr.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
apiVersion: druid.apache.org/v1alpha1
kind: DruidIngestion
metadata:
labels:
app.kubernetes.io/name: druidingestion
app.kubernetes.io/instance: druidingestion-sample
name: wikipedia-ingestion
spec:
suspend: false
druidCluster: tiny-cluster
ingestion:
type: native-batch
spec: |-
{
"type" : "index_parallel",
"spec" : {
"dataSchema" : {
"dataSource" : "wikipedia-2",
"timestampSpec": {
"column": "time",
"format": "iso"
},
"dimensionsSpec" : {
"dimensions" : [
"channel",
"cityName",
"comment",
"countryIsoCode",
"countryName",
"isAnonymous",
"isMinor",
"isNew",
"isRobot",
"isUnpatrolled",
"metroCode",
"namespace",
"page",
"regionIsoCode",
"regionName",
"user",
{ "name": "added", "type": "long" },
{ "name": "deleted", "type": "long" },
{ "name": "delta", "type": "long" }
]
},
"metricsSpec" : [],
"granularitySpec" : {
"type" : "uniform",
"segmentGranularity" : "day",
"queryGranularity" : "none",
"intervals" : ["2015-09-12/2015-09-13"],
"rollup" : false
}
},
"ioConfig" : {
"type" : "index_parallel",
"inputSource" : {
"type" : "local",
"baseDir" : "quickstart/tutorial/",
"filter" : "wikiticker-2015-09-12-sampled.json.gz"
},
"inputFormat" : {
"type" : "json"
},
"appendToExisting" : false
},
"tuningConfig" : {
"type" : "index_parallel",
"maxRowsPerSegment" : 5000000,
"maxRowsInMemory" : 25000
}
}
}
40 changes: 40 additions & 0 deletions e2e/druid-ingestion-test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
#!/bin/sh

set -e

TASK_ID=$1

echo "Checking Status for task $TASK_ID..."
STATUS=$(curl -s http://druid-tiny-cluster-coordinators.druid.svc:8088/druid/indexer/v1/task/${TASK_ID}/status | jq '.status.status' -r);
while [ $STATUS == "RUNNING" ]
do
sleep 8;
echo "TASK is "$STATUS "..."
STATUS=$(curl -s http://druid-tiny-cluster-coordinators.druid.svc:8088/druid/indexer/v1/task/${TASK_ID}/status | jq '.status.status' -r)
done

if [ $STATUS == "SUCCESS" ]
then
echo "TASK $TASK_ID COMPLETED SUCCESSFULLY"
sleep 60 # need time for the segments to become queryable
else
echo "TASK $TASK_ID FAILED !!!!"
exit 1
fi

echo "Querying Data ... "
echo "Running query SELECT COUNT(*) AS \"Count\" FROM \"wikipedia-2\" WHERE isMinor = 'false'"

cat > query.json <<EOF
{"query":"SELECT COUNT(*) AS \"Count\" FROM \"wikipedia-2\" WHERE isMinor = 'false'","resultFormat":"objectlines"}
EOF

count=`curl -s -XPOST -H'Content-Type: application/json' http://druid-tiny-cluster-routers.druid.svc:8088/druid/v2/sql -d @query.json| jq '.Count'`
echo "count is $count"
if [ $count != "21936" ]
then
echo "Query failed !!!"
exit 1
else
echo "Query Successful !!!"
fi
8 changes: 8 additions & 0 deletions e2e/e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,14 @@ done
# Running test job with an example dataset
make deploy-testjob

# Running a test DruidIngestion resource and wait for the task to be submitted
kubectl apply -f e2e/configs/druid-ingestion-cr.yaml -n ${NAMESPACE}
sleep 30 # wait for the manager to submit the ingestion task

# get the ingestion task ID and launch the monitoring job
taskId=`kubectl get druidingestion -n druid wikipedia-ingestion --template={{.status.taskId}}`
make deploy-testingestionjob TASK_ID=$taskId

# Delete old druid
kubectl delete -f e2e/configs/druid-cr.yaml -n ${NAMESPACE}
for d in $(kubectl get pods -n ${NAMESPACE} -l app=druid -l druid_cr=tiny-cluster -o name)
Expand Down
4 changes: 2 additions & 2 deletions e2e/monitor-task.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
#!/bin/sh
set -e
echo "---------------"
echo "Checking the status of running job ..."
echo "Checking the status of running job $JOB_ID ..."
for (( i=0; i<=9; i++ ))
do
sleep 60
STAT=`kubectl get job wiki-test --template={{.status.succeeded}}`
STAT=`kubectl get job $JOB_ID --template={{.status.succeeded}}`
if [ "$STAT" == "<no value>" ]
then
echo "Seems to be in progress ..."
Expand Down
1 change: 1 addition & 0 deletions e2e/wikipedia-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ done
if [ $STATUS == "SUCCESS" ]
then
echo "TASK $task_id COMPLETED SUCCESSFULLY"
sleep 60 # need time for the segments to become queryable
else
echo "TASK $task_id FAILED !!!!"
fi
Expand Down
Loading