From bda02b13cab950f12a2fc2ee5e397f2dda6a8bf3 Mon Sep 17 00:00:00 2001 From: Amanuel Engeda Date: Tue, 11 Jul 2023 13:56:58 -0700 Subject: [PATCH] fix SG and OICD leak --- .github/actions/e2e/cleanup/action.yaml | 69 ++++++++----- .github/workflows/e2e.yaml | 126 +++++++++++------------ .github/workflows/sweeper.yaml | 6 +- test/hack/cleanup/go.mod | 1 + test/hack/cleanup/go.sum | 2 + test/hack/cleanup/main.go | 131 ++++++++++++++++++++++-- test/suites/drift/suite_test.go | 4 + 7 files changed, 239 insertions(+), 100 deletions(-) diff --git a/.github/actions/e2e/cleanup/action.yaml b/.github/actions/e2e/cleanup/action.yaml index 84bc1ea8cb42..54e9b835de3c 100644 --- a/.github/actions/e2e/cleanup/action.yaml +++ b/.github/actions/e2e/cleanup/action.yaml @@ -27,32 +27,45 @@ runs: - uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - - uses: ./.github/actions/e2e/install-eksctl - with: - eksctl_version: v0.147.0 - - name: delete-cluster - shell: bash - run: | - eksctl delete cluster --name ${{ inputs.cluster_name }} --timeout 60m --wait || true - - name: delete-iam-policies-stack - shell: bash - run: | - aws cloudformation delete-stack --stack-name iam-${{ inputs.cluster_name }} - aws cloudformation wait stack-delete-complete --stack-name iam-${{ inputs.cluster_name }} - - name: delete-cluster-stack - shell: bash - run: | - aws cloudformation delete-stack --stack-name eksctl-${{ inputs.cluster_name }}-cluster || true - aws cloudformation wait stack-delete-complete --stack-name eksctl-${{ inputs.cluster_name }}-cluster || true - - name: delete-launch-templates + # - uses: ./.github/actions/e2e/install-eksctl + # with: + # eksctl_version: v0.147.0 + - name: delete-security-group shell: bash - run: | - aws ec2 describe-launch-templates \ - --filter Name=tag:karpenter.k8s.aws/cluster,Values=${{ inputs.cluster_name }} \ - --query "LaunchTemplates[*].LaunchTemplateId" \ - --output text | - xargs \ - -n 1 \ - -r \ - aws ec2 delete-launch-template \ - --launch-template-id \ No newline at end of file + run: | + aws ec2 describe-security-groups \ + --group-names "security-group-drift" + --filters Name=tag:karpenter.sh/discovery,Values=${{ inputs.cluster_name }} \ + --query "SecurityGroups[*].{ID:GroupId}" \ + --output text + # - name: delete-cluster + # shell: bash + # run: | + # eksctl delete cluster --name ${{ inputs.cluster_name }} --timeout 60m --wait || true + # - name: delete-iam-policies-stack + # shell: bash + # run: | + # aws cloudformation delete-stack --stack-name iam-${{ inputs.cluster_name }} + # aws cloudformation wait stack-delete-complete --stack-name iam-${{ inputs.cluster_name }} + # - name: delete-cluster-stack + # shell: bash + # run: | + # aws cloudformation delete-stack --stack-name eksctl-${{ inputs.cluster_name }}-cluster || true + # aws cloudformation wait stack-delete-complete --stack-name eksctl-${{ inputs.cluster_name }}-cluster || true + # - name: delete-launch-templates + # shell: bash + # run: | + # aws ec2 describe-launch-templates \ + # --filter Name=tag:karpenter.k8s.aws/cluster,Values=${{ inputs.cluster_name }} \ + # --query "LaunchTemplates[*].LaunchTemplateId" \ + # --output text | + # xargs \ + # -n 1 \ + # -r \ + # aws ec2 delete-launch-template \ + # --launch-template-id + + # aws ec2 describe-security-groups \ + # --filters Name=tag:karpenter.sh/discovery,Values=${{ inputs.cluster_name }} \ + # --query "SecurityGroups[*].{ID:GroupId}" \ + # --output text \ No newline at end of file diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index de85f30407f5..4e1ef77ed599 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -80,63 +80,63 @@ jobs: role-to-assume: arn:aws:iam::${{ vars.ACCOUNT_ID }}:role/${{ vars.ROLE_NAME }} aws-region: ${{ inputs.region }} role-duration-seconds: 21600 - - name: add jitter on cluster creation - run: | - # Creating jitter so that we can stagger cluster creation to avoid throttling - sleep $(( $RANDOM % 60 + 1 )) - - name: generate cluster name - run: | - CLUSTER_NAME=$(echo ${{ inputs.suite }}-$RANDOM$RANDOM | awk '{print tolower($0)}') - echo "Using cluster name \"$CLUSTER_NAME\"" - echo CLUSTER_NAME=$CLUSTER_NAME >> $GITHUB_ENV - - name: create eks cluster '${{ env.CLUSTER_NAME }}' - uses: ./.github/actions/e2e/create-cluster - with: - account_id: ${{ vars.ACCOUNT_ID }} - role: ${{ vars.ROLE_NAME }} - region: ${{ inputs.region }} - cluster_name: ${{ env.CLUSTER_NAME }} - k8s_version: ${{ inputs.k8s_version }} - ip_family: ${{ inputs.suite == 'IPv6' && 'IPv6' || 'IPv4' }} # Set the value to IPv6 if IPv6 suite, else IPv4 - git_ref: ${{ inputs.git_ref }} - - name: install prometheus - uses: ./.github/actions/e2e/install-prometheus - with: - account_id: ${{ vars.ACCOUNT_ID }} - role: ${{ vars.ROLE_NAME }} - region: ${{ vars.PROMETHEUS_REGION }} - cluster_name: ${{ env.CLUSTER_NAME }} - workspace_id: ${{ vars.WORKSPACE_ID }} - git_ref: ${{ inputs.git_ref }} - - name: install karpenter - uses: ./.github/actions/e2e/install-karpenter - with: - account_id: ${{ vars.ACCOUNT_ID }} - role: ${{ vars.ROLE_NAME }} - region: ${{ inputs.region }} - cluster_name: ${{ env.CLUSTER_NAME }} - git_ref: ${{ inputs.git_ref }} - - name: run the ${{ inputs.suite }} test suite - run: | - aws eks update-kubeconfig --name ${{ env.CLUSTER_NAME }} - TEST_SUITE="${{ inputs.suite }}" ENABLE_METRICS=${{ inputs.enable_metrics }} METRICS_REGION=${{ vars.TIMESTREAM_REGION }} GIT_REF="$(git rev-parse HEAD)" make e2etests - - name: notify slack of success or failure - uses: ./.github/actions/e2e/slack/notify - if: (success() || failure()) && inputs.event_name != 'workflow_run' && inputs.event_name != 'conformance' - with: - url: ${{ secrets.SLACK_WEBHOOK_URL }} - suite: ${{ inputs.suite }} - k8s_version: ${{ inputs.k8s_version }} - event_name: ${{ inputs.event_name }} - git_ref: ${{ inputs.git_ref }} - - name: dump logs on failure - uses: ./.github/actions/e2e/dump-logs - if: failure() || cancelled() - with: - account_id: ${{ vars.ACCOUNT_ID }} - role: ${{ vars.ROLE_NAME }} - region: ${{ inputs.region }} - cluster_name: ${{ env.CLUSTER_NAME }} + # - name: add jitter on cluster creation + # run: | + # # Creating jitter so that we can stagger cluster creation to avoid throttling + # sleep $(( $RANDOM % 60 + 1 )) + # - name: generate cluster name + # run: | + # CLUSTER_NAME=$(echo ${{ inputs.suite }}-$RANDOM$RANDOM | awk '{print tolower($0)}') + # echo "Using cluster name \"$CLUSTER_NAME\"" + # echo CLUSTER_NAME=$CLUSTER_NAME >> $GITHUB_ENV + # - name: create eks cluster '${{ env.CLUSTER_NAME }}' + # uses: ./.github/actions/e2e/create-cluster + # with: + # account_id: ${{ vars.ACCOUNT_ID }} + # role: ${{ vars.ROLE_NAME }} + # region: ${{ inputs.region }} + # cluster_name: ${{ env.CLUSTER_NAME }} + # k8s_version: ${{ inputs.k8s_version }} + # ip_family: ${{ inputs.suite == 'IPv6' && 'IPv6' || 'IPv4' }} # Set the value to IPv6 if IPv6 suite, else IPv4 + # git_ref: ${{ inputs.git_ref }} + # - name: install prometheus + # uses: ./.github/actions/e2e/install-prometheus + # with: + # account_id: ${{ vars.ACCOUNT_ID }} + # role: ${{ vars.ROLE_NAME }} + # region: ${{ vars.PROMETHEUS_REGION }} + # cluster_name: ${{ env.CLUSTER_NAME }} + # workspace_id: ${{ vars.WORKSPACE_ID }} + # git_ref: ${{ inputs.git_ref }} + # - name: install karpenter + # uses: ./.github/actions/e2e/install-karpenter + # with: + # account_id: ${{ vars.ACCOUNT_ID }} + # role: ${{ vars.ROLE_NAME }} + # region: ${{ inputs.region }} + # cluster_name: ${{ env.CLUSTER_NAME }} + # git_ref: ${{ inputs.git_ref }} + # - name: run the ${{ inputs.suite }} test suite + # run: | + # aws eks update-kubeconfig --name ${{ env.CLUSTER_NAME }} + # FOCUS="should deprovision nodes that have drifted due to securitygroup" ENABLE_METRICS=${{ inputs.enable_metrics }} METRICS_REGION=${{ vars.TIMESTREAM_REGION }} GIT_REF="$(git rev-parse HEAD)" make e2etests + # - name: notify slack of success or failure + # uses: ./.github/actions/e2e/slack/notify + # if: (success() || failure()) && inputs.event_name != 'workflow_run' && inputs.event_name != 'conformance' + # with: + # url: ${{ secrets.SLACK_WEBHOOK_URL }} + # suite: ${{ inputs.suite }} + # k8s_version: ${{ inputs.k8s_version }} + # event_name: ${{ inputs.event_name }} + # git_ref: ${{ inputs.git_ref }} + # - name: dump logs on failure + # uses: ./.github/actions/e2e/dump-logs + # if: failure() || cancelled() + # with: + # account_id: ${{ vars.ACCOUNT_ID }} + # role: ${{ vars.ROLE_NAME }} + # region: ${{ inputs.region }} + # cluster_name: ${{ env.CLUSTER_NAME }} - name: cleanup karpenter and cluster '${{ env.CLUSTER_NAME }}' resources uses: ./.github/actions/e2e/cleanup if: always() @@ -144,10 +144,10 @@ jobs: account_id: ${{ vars.ACCOUNT_ID }} role: ${{ vars.ROLE_NAME }} region: ${{ inputs.region }} - cluster_name: ${{ env.CLUSTER_NAME }} + cluster_name: aengeda-karpenter-playground-us-west-2 git_ref: ${{ inputs.git_ref }} - - if: always() && inputs.event_name == 'workflow_run' - uses: ./.github/actions/commit-status/end - with: - name: "${{ github.workflow }} / e2e (${{ inputs.suite }}) / ${{ github.job }} (snapshot)" - git_ref: ${{ inputs.git_ref }} \ No newline at end of file + # - if: always() && inputs.event_name == 'workflow_run' + # uses: ./.github/actions/commit-status/end + # with: + # name: "${{ github.workflow }} / e2e (${{ inputs.suite }}) / ${{ github.job }} (snapshot)" + # git_ref: ${{ inputs.git_ref }} \ No newline at end of file diff --git a/.github/workflows/sweeper.yaml b/.github/workflows/sweeper.yaml index 0ad247a5ccbd..24745257198b 100644 --- a/.github/workflows/sweeper.yaml +++ b/.github/workflows/sweeper.yaml @@ -9,6 +9,10 @@ permissions: jobs: sweeper: if: github.repository == 'aws/karpenter' || github.event_name == 'workflow_dispatch' + strategy: + fail-fast: false + matrix: + region: [us-east-2, us-west-2, eu-west-1] runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 @@ -16,7 +20,7 @@ jobs: uses: aws-actions/configure-aws-credentials@v2 with: role-to-assume: arn:aws:iam::${{ vars.ACCOUNT_ID }}:role/${{ vars.ROLE_NAME }} - aws-region: ${{ vars.AWS_REGION }} + aws-region: ${{ matrix.region }} - uses: actions/setup-go@v4 with: go-version-file: test/hack/cleanup/go.mod diff --git a/test/hack/cleanup/go.mod b/test/hack/cleanup/go.mod index 6913122a71d0..10e2a3b0e703 100644 --- a/test/hack/cleanup/go.mod +++ b/test/hack/cleanup/go.mod @@ -7,6 +7,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/cloudformation v1.30.0 github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.26.2 github.com/aws/aws-sdk-go-v2/service/ec2 v1.102.0 + github.com/aws/aws-sdk-go-v2/service/iam v1.21.0 github.com/samber/lo v1.38.1 go.uber.org/zap v1.24.0 ) diff --git a/test/hack/cleanup/go.sum b/test/hack/cleanup/go.sum index 838f7547d5f3..c75c22fa2e42 100644 --- a/test/hack/cleanup/go.sum +++ b/test/hack/cleanup/go.sum @@ -18,6 +18,8 @@ github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.26.2 h1:PWGu2JhCb/XJlJ7SSFJq7 github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.26.2/go.mod h1:2KOZkkzMDZCo/aLzPhys06mHNkiU74u85aMJA3PLRvg= github.com/aws/aws-sdk-go-v2/service/ec2 v1.102.0 h1:P4dyjm49F2kKws0FpouBC6fjVImACXKt752+CWa01lM= github.com/aws/aws-sdk-go-v2/service/ec2 v1.102.0/go.mod h1:tIctCeX9IbzsUTKHt53SVEcgyfxV2ElxJeEB+QUbc4M= +github.com/aws/aws-sdk-go-v2/service/iam v1.21.0 h1:8hEpu60CWlrp7iEBUFRZhgPoX6+gadaGL1sD4LoRYS0= +github.com/aws/aws-sdk-go-v2/service/iam v1.21.0/go.mod h1:aQZ8BI+reeaY7RI/QQp7TKCSUHOesTdrzzylp3CW85c= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 h1:bkRyG4a929RCnpVSTvLM2j/T4ls015ZhhYApbmYs15s= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28/go.mod h1:jj7znCIg05jXlaGBlFMGP8+7UN3VtCkRBG2spnmRQkU= github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 h1:nneMBM2p79PGWBQovYO/6Xnc2ryRMw3InnDJq1FHkSY= diff --git a/test/hack/cleanup/main.go b/test/hack/cleanup/main.go index 7890e34dda72..a0597b4ab798 100644 --- a/test/hack/cleanup/main.go +++ b/test/hack/cleanup/main.go @@ -16,6 +16,8 @@ package main import ( "context" + "fmt" + "strings" "time" "github.com/aws/aws-sdk-go-v2/config" @@ -25,16 +27,19 @@ import ( cloudwatchtypes "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/aws/aws-sdk-go-v2/service/ec2" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/aws-sdk-go-v2/service/iam" + "github.com/aws/aws-sdk-go/aws" "github.com/samber/lo" "go.uber.org/zap" ) const ( - expirationTTL = time.Hour * 12 + expirationTTL = time.Minute * 2 karpenterMetricNamespace = "testing.karpenter.sh/cleanup" karpenterProvisionerNameTag = "karpenter.sh/provisioner-name" karpenterLaunchTemplateTag = "karpenter.k8s.aws/cluster" + karpenterSecurityGroupTag = "karpenter.sh/discovery" githubRunURLTag = "github.com/run-url" ) @@ -51,9 +56,18 @@ func main() { ec2Client := ec2.NewFromConfig(cfg) cloudFormationClient := cloudformation.NewFromConfig(cfg) cloudWatchClient := cloudwatch.NewFromConfig(cfg) + iamClient := iam.NewFromConfig(cfg) - // Terminate any old instances that were provisioned by Karpenter as part of testing - // We execute these in serial since we will most likely get rate limited if we try to delete these too aggressively + cleanupInstances(ctx, ec2Client, cloudWatchClient, expirationTime, logger) + cleanupSecurityGroup(ctx, ec2Client, cloudWatchClient, expirationTime, logger) + cleanupStack(ctx, cloudWatchClient, cloudFormationClient, expirationTime, logger) + cleanupLaunchTemplates(ctx, ec2Client, cloudWatchClient, expirationTime, logger) + cleanupOIDCProvider(ctx, iamClient, cloudWatchClient, expirationTime, logger) +} + +// Terminate any old instances that were provisioned by Karpenter as part of testing +// We execute these in serial since we will most likely get rate limited if we try to delete these too aggressively +func cleanupInstances(ctx context.Context, ec2Client *ec2.Client, cloudWatchClient *cloudwatch.Client, expirationTime time.Time, logger *zap.SugaredLogger) { ids := getOldInstances(ctx, ec2Client, expirationTime) logger.With("ids", ids, "count", len(ids)).Infof("discovered test instances to delete") if len(ids) > 0 { @@ -68,9 +82,30 @@ func main() { } } } +} + +func cleanupSecurityGroup(ctx context.Context, ec2Client *ec2.Client, cloudWatchClient *cloudwatch.Client, expirationTime time.Time, logger *zap.SugaredLogger) { + ids := getOldSecurityGroups(ctx, ec2Client, expirationTime) + logger.With("ids", ids, "count", len(ids)).Infof("discovered test security groups to delete") + deleted := 0 + for _, id := range ids { + if _, err := ec2Client.DeleteSecurityGroup(ctx, &ec2.DeleteSecurityGroupInput{ + GroupId: aws.String(id), + }); err != nil { + logger.With("ids", id).Errorf("terminating test security group, %v", err) + } else { + logger.With("ids", id).Infof("terminated test security group") + deleted++ + } + } + if err := fireMetric(ctx, cloudWatchClient, "SecurityGroupDeleted", float64(deleted)); err != nil { + logger.With("name", "InstancesDeleted").Errorf("firing metric, %v", err) + } +} - // Terminate any old stacks that were provisioned as part of testing - // We execute these in serial since we will most likely get rate limited if we try to delete these too aggressively +// Terminate any old stacks that were provisioned as part of testing +// We execute these in serial since we will most likely get rate limited if we try to delete these too aggressively +func cleanupStack(ctx context.Context, cloudWatchClient *cloudwatch.Client, cloudFormationClient *cloudformation.Client, expirationTime time.Time, logger *zap.SugaredLogger) { names := getOldStacks(ctx, cloudFormationClient, expirationTime) logger.With("names", names, "count", len(names)).Infof("discovered test stacks to delete") deleted := 0 @@ -87,11 +122,14 @@ func main() { if err := fireMetric(ctx, cloudWatchClient, "StacksDeleted", float64(deleted)); err != nil { logger.With("name", "StacksDeleted").Errorf("firing metric, %v", err) } +} - // Terminate any old launch templates that were managed by Karpenter and were provisioned as part of testing - names = getOldLaunchTemplates(ctx, ec2Client, expirationTime) +// Terminate any old launch templates that were managed by Karpenter and were provisioned as part of testing +// We execute these in serial since we will most likely get rate limited if we try to delete these too aggressively +func cleanupLaunchTemplates(ctx context.Context, ec2Client *ec2.Client, cloudWatchClient *cloudwatch.Client, expirationTime time.Time, logger *zap.SugaredLogger) { + names := getOldLaunchTemplates(ctx, ec2Client, expirationTime) logger.With("names", names, "count", len(names)).Infof("discovered test launch templates to delete") - deleted = 0 + deleted := 0 for i := range names { if _, err := ec2Client.DeleteLaunchTemplate(ctx, &ec2.DeleteLaunchTemplateInput{ LaunchTemplateName: lo.ToPtr(names[i]), @@ -107,6 +145,26 @@ func main() { } } +// Terminate any old OIDC providers that were are remaining as part of testing +// We execute these in serial since we will most likely get rate limited if we try to delete these too aggressively +func cleanupOIDCProvider(ctx context.Context, iamClient *iam.Client, cloudWatchClient *cloudwatch.Client, expirationTime time.Time, logger *zap.SugaredLogger) { + arns := getOldOIDCProviders(ctx, iamClient, expirationTime) + deleted := 0 + for i := range arns { + if _, err := iamClient.DeleteOpenIDConnectProvider(ctx, &iam.DeleteOpenIDConnectProviderInput{ + OpenIDConnectProviderArn: lo.ToPtr(arns[i]), + }); err != nil { + logger.With("arn", arns[i]).Errorf("deleting test cluster oidc provider, %v", err) + } else { + logger.With("arn", arns[i]).Infof("deleted test cluster oidc provider") + deleted++ + } + } + if err := fireMetric(ctx, cloudWatchClient, "OIDCDeleted", float64(deleted)); err != nil { + logger.With("name", "OIDCDeleted").Errorf("firing metric, %v", err) + } +} + func fireMetric(ctx context.Context, cloudWatchClient *cloudwatch.Client, name string, value float64) error { _, err := cloudWatchClient.PutMetricData(ctx, &cloudwatch.PutMetricDataInput{ Namespace: lo.ToPtr(karpenterMetricNamespace), @@ -155,6 +213,43 @@ func getOldInstances(ctx context.Context, ec2Client *ec2.Client, expirationTime return ids } +func getOldSecurityGroups(ctx context.Context, ec2Client *ec2.Client, expirationTime time.Time) (ids []string) { + var nextToken *string + for { + out := lo.Must(ec2Client.DescribeSecurityGroups(ctx, &ec2.DescribeSecurityGroupsInput{ + Filters: []ec2types.Filter{ + { + Name: lo.ToPtr("group-name"), + Values: []string{"security-group-drift"}, + }, + }, + NextToken: nextToken, + })) + + for _, sg := range out.SecurityGroups { + creationDate, found := lo.Find(sg.Tags, func(tag ec2types.Tag) bool { + return *tag.Key == "creation-date" + }) + if !found { + continue + } + time, err := time.Parse(time.RFC3339, *creationDate.Value) + if err != nil { + continue + } + if time.Before(expirationTime) { + ids = append(ids, lo.FromPtr(sg.GroupId)) + } + } + + nextToken = out.NextToken + if nextToken == nil { + break + } + } + return ids +} + func getOldStacks(ctx context.Context, cloudFormationClient *cloudformation.Client, expirationTime time.Time) (names []string) { var nextToken *string for { @@ -208,3 +303,23 @@ func getOldLaunchTemplates(ctx context.Context, ec2Client *ec2.Client, expiratio } return names } + +func getOldOIDCProviders(ctx context.Context, iamClient *iam.Client, expirationTime time.Time) (names []string) { + testSuite := []string{"upgrade", "chaos", "consolidation", "drift", "integration", "interruption", "ipv6", "machine", "scale", "utilization"} + out := lo.Must(iamClient.ListOpenIDConnectProviders(ctx, &iam.ListOpenIDConnectProvidersInput{})) + + for _, oicdArn := range out.OpenIDConnectProviderList { + oicd := lo.Must(iamClient.GetOpenIDConnectProvider(ctx, &iam.GetOpenIDConnectProviderInput{ + OpenIDConnectProviderArn: oicdArn.Arn, + })) + + for _, t := range oicd.Tags { + if lo.FromPtr(t.Key) == "alpha.eksctl.io/cluster-name" && + lo.SomeBy(testSuite, func(s string) bool { return strings.HasPrefix(lo.FromPtr(t.Value), fmt.Sprintf("%s-", s)) }) && + oicd.CreateDate.Before(expirationTime) { + names = append(names, lo.FromPtr(oicdArn.Arn)) + } + } + } + return names +} diff --git a/test/suites/drift/suite_test.go b/test/suites/drift/suite_test.go index 7c86d3adb1c5..fc0c3e6a2af9 100644 --- a/test/suites/drift/suite_test.go +++ b/test/suites/drift/suite_test.go @@ -171,6 +171,10 @@ var _ = Describe("Drift", Label("AWS"), func() { Key: awssdk.String("karpenter.sh/discovery"), Value: awssdk.String(settings.FromContext(env.Context).ClusterName), }, + { + Key: awssdk.String("creation-date"), + Value: awssdk.String(time.Now().Format(time.RFC3339)), + }, }, }, },