From 840a9711a17ba1d6cb594dbf392d9cd03bab959a Mon Sep 17 00:00:00 2001 From: Aditya Thebe Date: Wed, 1 Nov 2023 07:49:55 +0545 Subject: [PATCH] chore: migrate webhook test from bash to go * rename postgres push test to operater e2e test --- ...res-and-push-test.yml => e2e-operator.yml} | 4 +- .github/workflows/webhook-check-test.yml | 43 ---- pkg/api/suite_test.go | 90 ++++++++ pkg/api/webhook_test.go | 194 ++++++++++++++++++ .../{e2e-postgres-push.sh => e2e-operator.sh} | 0 test/e2e-webook.sh | 166 --------------- 6 files changed, 286 insertions(+), 211 deletions(-) rename .github/workflows/{postgres-and-push-test.yml => e2e-operator.yml} (93%) delete mode 100644 .github/workflows/webhook-check-test.yml create mode 100644 pkg/api/suite_test.go create mode 100644 pkg/api/webhook_test.go rename test/{e2e-postgres-push.sh => e2e-operator.sh} (100%) delete mode 100755 test/e2e-webook.sh diff --git a/.github/workflows/postgres-and-push-test.yml b/.github/workflows/e2e-operator.yml similarity index 93% rename from .github/workflows/postgres-and-push-test.yml rename to .github/workflows/e2e-operator.yml index 8047b3443..5910701c9 100644 --- a/.github/workflows/postgres-and-push-test.yml +++ b/.github/workflows/e2e-operator.yml @@ -17,7 +17,7 @@ on: - "**.yaml" - "**.yml" - "test/**" -name: Postgres-and-Push-Test +name: Operator E2E Test permissions: contents: read jobs: @@ -41,4 +41,4 @@ jobs: cache- - run: make bin - name: Test - run: ./test/e2e-postgres-push.sh + run: ./test/e2e-operator.sh diff --git a/.github/workflows/webhook-check-test.yml b/.github/workflows/webhook-check-test.yml deleted file mode 100644 index d485035f5..000000000 --- a/.github/workflows/webhook-check-test.yml +++ /dev/null @@ -1,43 +0,0 @@ -on: - push: - tags: - - v* - branches: - - master - paths: - - "**.go" - - "Makefile" - - "**.yaml" - - "**.yml" - - "test/**" - pull_request: - paths: - - "**.go" - - "Makefile" - - "**.yaml" - - "**.yml" - - "test/**" -name: Webhook Check Test -permissions: - contents: read -jobs: - test: - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2.2.0 - with: - go-version: 1.20.x - - name: Checkout code - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - - uses: actions/cache@8492260343ad570701412c2f464a5877dc76bace # v2 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - .bin - key: cache-${{ hashFiles('**/go.sum') }}-${{ hashFiles('.bin/*') }} - restore-keys: | - cache- - - name: Test - run: ./test/e2e-webook.sh diff --git a/pkg/api/suite_test.go b/pkg/api/suite_test.go new file mode 100644 index 000000000..a50d178d5 --- /dev/null +++ b/pkg/api/suite_test.go @@ -0,0 +1,90 @@ +package api_test + +import ( + gocontext "context" + "fmt" + "net/http" + "testing" + + embeddedPG "github.com/fergusstrange/embedded-postgres" + apiContext "github.com/flanksource/canary-checker/api/context" + "github.com/flanksource/canary-checker/pkg/api" + "github.com/flanksource/canary-checker/pkg/cache" + "github.com/flanksource/canary-checker/pkg/db" + "github.com/flanksource/commons/logger" + "github.com/flanksource/duty" + "github.com/flanksource/duty/context" + "github.com/flanksource/duty/testutils" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/labstack/echo/v4" + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "gorm.io/gorm" +) + +var ( + testEchoServer *echo.Echo + testEchoServerPort = 9232 + dbPort = 9999 + ctx context.Context + + testDB *gorm.DB + testPool *pgxpool.Pool + + postgresServer *embeddedPG.EmbeddedPostgres +) + +func TestAPI(t *testing.T) { + RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "API Tests") +} + +var _ = ginkgo.BeforeSuite(func() { + var err error + + config, dbString := testutils.GetEmbeddedPGConfig("test_canary_job", dbPort) + postgresServer = embeddedPG.NewDatabase(config) + if err = postgresServer.Start(); err != nil { + ginkgo.Fail(err.Error()) + } + logger.Infof("Started postgres on port: %d", dbPort) + + if testDB, testPool, err = duty.SetupDB(dbString, nil); err != nil { + ginkgo.Fail(err.Error()) + } + cache.PostgresCache = cache.NewPostgresCache(testPool) + + // Set this because some functions directly use db.Gorm + db.Gorm = testDB + db.Pool = testPool + + ctx = context.NewContext(gocontext.Background()).WithDB(testDB, testPool) + apiContext.DefaultContext = ctx + + testEchoServer = echo.New() + testEchoServer.POST("/webhook/:id", api.WebhookHandler) + listenAddr := fmt.Sprintf(":%d", testEchoServerPort) + + go func() { + defer ginkgo.GinkgoRecover() // Required by ginkgo, if an assertion is made in a goroutine. + if err := testEchoServer.Start(listenAddr); err != nil { + if err == http.ErrServerClosed { + logger.Infof("Server closed") + } else { + ginkgo.Fail(fmt.Sprintf("Failed to start test server: %v", err)) + } + } + }() +}) + +var _ = ginkgo.AfterSuite(func() { + logger.Infof("Stopping test echo server") + if err := testEchoServer.Shutdown(gocontext.Background()); err != nil { + ginkgo.Fail(err.Error()) + } + + logger.Infof("Stopping postgres") + if err := postgresServer.Stop(); err != nil { + ginkgo.Fail(err.Error()) + } +}) diff --git a/pkg/api/webhook_test.go b/pkg/api/webhook_test.go new file mode 100644 index 000000000..d1dd1c8c6 --- /dev/null +++ b/pkg/api/webhook_test.go @@ -0,0 +1,194 @@ +package api_test + +import ( + "encoding/json" + "fmt" + netHTTP "net/http" + "time" + + v1 "github.com/flanksource/canary-checker/api/v1" + "github.com/flanksource/canary-checker/checks" + "github.com/flanksource/canary-checker/pkg/db" + canaryJobs "github.com/flanksource/canary-checker/pkg/jobs/canary" + "github.com/flanksource/commons/http" + "github.com/flanksource/duty/job" + "github.com/flanksource/duty/models" + "github.com/flanksource/duty/types" + "github.com/google/uuid" + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = ginkgo.Describe("Test Sync Canary Job", ginkgo.Ordered, func() { + canarySpec := v1.CanarySpec{ + Schedule: "@every 1s", + Webhook: &v1.WebhookCheck{ + Description: v1.Description{ + Name: "my-webhook", + }, + Templatable: v1.Templatable{ + Transform: v1.Template{ + Expression: ` + results.json.alerts.map(r, + { + 'name': r.name + r.fingerprint, + 'labels': r.labels, + 'icon': 'alert', + 'message': r.annotations.summary, + 'description': r.annotations.description, + 'deletedAt': has(r.endsAt) ? r.endsAt : null, + } + ).toJSON()`, + }, + }, + Token: &types.EnvVar{ + ValueStatic: "my-token", + }, + }, + } + + var canaryM *models.Canary + client := http.NewClient().BaseURL(fmt.Sprintf("http://localhost:%d", testEchoServerPort)).Header("Content-Type", "application/json") + + ginkgo.It("should save a canary spec", func() { + b, err := json.Marshal(canarySpec) + Expect(err).To(BeNil()) + + var spec types.JSON + err = json.Unmarshal(b, &spec) + Expect(err).To(BeNil()) + + canaryM = &models.Canary{ + ID: uuid.New(), + Spec: spec, + Name: "alert-manager-canary", + } + err = testDB.Create(canaryM).Error + Expect(err).To(BeNil()) + + response, err := db.GetAllCanariesForSync(ctx, "") + Expect(err).To(BeNil()) + Expect(len(response)).To(Equal(1)) + }) + + ginkgo.It("schedule the canary job", func() { + canaryJobs.CanaryScheduler.Start() + jobCtx := job.JobRuntime{ + Context: ctx, + } + + err := canaryJobs.SyncCanaryJobs(jobCtx) + Expect(err).To(BeNil()) + }) + + ginkgo.It("Should have created the webhook check", func() { + var count = 30 + for { + time.Sleep(time.Second) // Wait for SyncCanaryJob to create the check + count-- + + var checks []models.Check + err := ctx.DB().Where("name = ?", canarySpec.Webhook.Name).Find(&checks).Error + Expect(err).To(BeNil()) + + if len(checks) == 1 { + break + } + + if len(checks) != 1 && count <= 0 { + ginkgo.Fail("expected check to be created") + } + } + }) + + ginkgo.It("Should forbid when webhook is called without the auth token", func() { + resp, err := client.R(ctx).Post(fmt.Sprintf("/webhook/%s", canarySpec.Webhook.Name), nil) + Expect(err).To(BeNil()) + Expect(resp.StatusCode).To(Equal(netHTTP.StatusUnauthorized)) + }) + + ginkgo.It("Should allow when webhook is called with the auth token", func() { + body := `{ + "version": "4", + "status": "firing", + "alerts": [ + { + "status": "firing", + "name": "first", + "labels": { + "severity": "critical", + "alertName": "ServerDown", + "location": "DataCenterA" + }, + "annotations": { + "summary": "Server in DataCenterA is down", + "description": "This alert indicates that a server in DataCenterA is currently down." + }, + "startsAt": "2023-10-30T08:00:00Z", + "generatorURL": "http://example.com/generatorURL/serverdown", + "fingerprint": "a1b2c3d4e5f6" + }, + { + "status": "resolved", + "labels": { + "severity": "warning", + "alertName": "HighCPUUsage", + "location": "DataCenterB" + }, + "annotations": { + "summary": "High CPU Usage in DataCenterB", + "description": "This alert indicates that there was high CPU usage in DataCenterB, but it is now resolved." + }, + "startsAt": "2023-10-30T09:00:00Z", + "generatorURL": "http://example.com/generatorURL/highcpuusage", + "name": "second", + "fingerprint": "x1y2z3w4v5" + } + ] +}` + resp, err := client.R(ctx).Post(fmt.Sprintf("/webhook/%s?token=%s", canarySpec.Webhook.Name, canarySpec.Webhook.Token.ValueStatic), body) + Expect(err).To(BeNil()) + Expect(resp.StatusCode).To(Equal(netHTTP.StatusOK)) + }) + + ginkgo.It("Should have created 2 new checks from the webhook", func() { + var result []models.Check + err := testDB.Where("type = ?", checks.WebhookCheckType).Where("name != ?", canarySpec.Webhook.Name).Find(&result).Error + Expect(err).To(BeNil()) + Expect(len(result)).To(Equal(2)) + }) + + ginkgo.It("Should have deleted one resolved alert from", func() { + body := `{ + "version": "4", + "status": "firing", + "alerts": [ + { + "status": "firing", + "name": "first", + "labels": { + "severity": "critical", + "alertName": "ServerDown", + "location": "DataCenterA" + }, + "annotations": { + "summary": "Server in DataCenterA is down", + "description": "This alert indicates that a server in DataCenterA is currently down." + }, + "startsAt": "2023-10-30T08:00:00Z", + "generatorURL": "http://example.com/generatorURL/serverdown", + "fingerprint": "a1b2c3d4e5f6", + "endsAt": "2023-10-30T09:15:00Z" + } + ] + }` + resp, err := client.R(ctx).Post(fmt.Sprintf("/webhook/%s?token=%s", canarySpec.Webhook.Name, canarySpec.Webhook.Token.ValueStatic), body) + Expect(err).To(BeNil()) + Expect(resp.StatusCode).To(Equal(netHTTP.StatusOK)) + + var result models.Check + err = testDB.Where("name = 'firsta1b2c3d4e5f6'").Find(&result).Error + Expect(err).To(BeNil()) + Expect(result.DeletedAt).To(Not(BeNil())) + }) +}) diff --git a/test/e2e-postgres-push.sh b/test/e2e-operator.sh similarity index 100% rename from test/e2e-postgres-push.sh rename to test/e2e-operator.sh diff --git a/test/e2e-webook.sh b/test/e2e-webook.sh deleted file mode 100755 index a4b7c7019..000000000 --- a/test/e2e-webook.sh +++ /dev/null @@ -1,166 +0,0 @@ -#!/bin/bash - -set -e - -echo "::group::Prerequisites" -required_tools=("tr" "docker" "curl") -for tool in "${required_tools[@]}"; do - if ! command -v $tool &>/dev/null; then - echo "$tool is not installed. Please install it to run this script." - exit 1 - fi -done -echo "All the required tools are installed." -echo "::endgroup::" - -# https://cedwards.xyz/defer-for-shell/ -DEFER= -defer() { - DEFER="$*; ${DEFER}" - trap "{ $DEFER }" EXIT -} - -port=7676 -container_name=webhook_postgres - -## Summary -# - Fire up the canary checker HTTP server for webhook endpoint -# - Expect the checks to be created -# - Create resolved alert -# - Expect the checks to be deleted - -echo "::group::Provisioning" -echo "Starting up postgres database" -docker run --rm -p 5433:5432 --name $container_name -e POSTGRES_PASSWORD=mysecretpassword -d postgres:14 -defer docker container rm -f $container_name - -echo "Starting canary-checker in the background" -go run main.go serve --httpPort=$port \ - --db-migrations \ - --disable-postgrest -vvv \ - --db='postgres://postgres:mysecretpassword@localhost:5433/postgres?sslmode=disable' \ - --maxStatusCheckCount=1 \ - fixtures/external/alertmanager.yaml &>/dev/null & -PROC_ID=$! -echo "Started canary checker with PID $PROC_ID" - -timeout=30 -echo Waiting for the server to come up. timeout=$timeout seconds -for ((i = 1; i <= $timeout; i++)); do - if [ $(curl -s -o /dev/null -w "%{http_code}" "http://localhost:$port/health") == "200" ]; then - echo "Server healthy (HTTP 200 OK)." - break - fi - - [ $i -eq $timeout ] && echo "Timeout: Server didn't return HTTP 200." && exit 1 - sleep 1 -done - -# Not sure why killing PROC_ID doesn't kill the HTTP server. -# So had to get the process id this way -process_id=$(lsof -nti:$port) -echo "Running on port $process_id" -defer "kill -9 $process_id" -echo "::endgroup::" - -echo "::group::Assertion" -echo Expect the check to be created by sync job -resp=$(docker exec $container_name psql "postgres://postgres:mysecretpassword@localhost:5432/postgres?sslmode=disable" -t -c "SELECT count(*) FROM checks WHERE name = 'my-webhook';" | tr -d '[:space:]') -if [ $resp -ne 1 ]; then - echo "Expected one webhook check to be created but $resp were created" - exit 1 -fi - -echo Attempt to call the webhook endpoint without the auth token -resp=$(curl -w "%{http_code}" -s -o /dev/null -X POST "localhost:$port/webhook/my-webhook") -if [ $resp -ne 401 ]; then - echo "Expected 401, got $resp" - exit 1 -fi - -echo Attempt to call the webhook endpoint with the auth token -resp=$(curl -w "%{http_code}" -s -o /dev/null -X POST "localhost:$port/webhook/my-webhook?token=webhook-auth-token") -if [ $resp -ne 200 ]; then - echo "Expected 200, got $resp" - exit 1 -fi - -echo Calling webhook endpoint with unresolved alert -curl -sL -o /dev/null -X POST -u 'admin@local:admin' --header "Content-Type: application/json" --data '{ - "version": "4", - "status": "firing", - "alerts": [ - { - "status": "firing", - "name": "first", - "labels": { - "severity": "critical", - "alertName": "ServerDown", - "location": "DataCenterA" - }, - "annotations": { - "summary": "Server in DataCenterA is down", - "description": "This alert indicates that a server in DataCenterA is currently down." - }, - "startsAt": "2023-10-30T08:00:00Z", - "generatorURL": "http://example.com/generatorURL/serverdown", - "fingerprint": "a1b2c3d4e5f6" - }, - { - "status": "resolved", - "labels": { - "severity": "warning", - "alertName": "HighCPUUsage", - "location": "DataCenterB" - }, - "annotations": { - "summary": "High CPU Usage in DataCenterB", - "description": "This alert indicates that there was high CPU usage in DataCenterB, but it is now resolved." - }, - "startsAt": "2023-10-30T09:00:00Z", - "generatorURL": "http://example.com/generatorURL/highcpuusage", - "name": "second", - "fingerprint": "x1y2z3w4v5" - } - ] -}' localhost:$port/webhook/my-webhook?token=webhook-auth-token - -resp=$(docker exec $container_name psql 'postgres://postgres:mysecretpassword@localhost:5432/postgres?sslmode=disable' -t -c "SELECT count(*) FROM checks WHERE type = 'webhook' AND deleted_at IS NULL;" | tr -d '[:space:]') -if [ $resp -ne 3 ]; then - echo "Expected 2 new checks to be created but $resp were found" - exit 1 -fi - -echo Calling webhook endpoint with a resolved alert -curl -sL -o /dev/null -X POST -u 'admin@local:admin' --header "Content-Type: application/json" --data '{ - "version": "4", - "status": "firing", - "alerts": [ - { - "status": "firing", - "name": "first", - "labels": { - "severity": "critical", - "alertName": "ServerDown", - "location": "DataCenterA" - }, - "annotations": { - "summary": "Server in DataCenterA is down", - "description": "This alert indicates that a server in DataCenterA is currently down." - }, - "startsAt": "2023-10-30T08:00:00Z", - "generatorURL": "http://example.com/generatorURL/serverdown", - "fingerprint": "a1b2c3d4e5f6", - "endsAt": "2023-10-30T09:15:00Z" - } - ] -}' localhost:$port/webhook/my-webhook?token=webhook-auth-token - -resp=$(docker exec $container_name psql 'postgres://postgres:mysecretpassword@localhost:5432/postgres?sslmode=disable' -t -c "SELECT name FROM checks WHERE type = 'webhook' AND deleted_at IS NOT NULL;" | tr -d '[:space:]') -if [ "$resp" != 'firsta1b2c3d4e5f6' ]; then - echo "Expected "firsta1b2c3d4e5f6" check to be deleted." - exit 1 -fi - -echo "::endgroup::" -exit 0