Skip to content

Commit

Permalink
Merge pull request #89 from engedaam/sock_testing
Browse files Browse the repository at this point in the history
Sock testing
  • Loading branch information
engedaam authored Aug 3, 2023
2 parents be60e26 + 15b07a1 commit 4d1628e
Show file tree
Hide file tree
Showing 9 changed files with 350 additions and 24 deletions.
4 changes: 2 additions & 2 deletions .github/actions/e2e/install-karpenter/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ runs:
aws eks update-kubeconfig --name "${{ inputs.cluster_name }}"
helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter \
-n karpenter \
--version "v0-$(git rev-parse HEAD)" \
--version v0.29.2 \
--set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${{ inputs.account_id }}:role/karpenter-irsa-${{ inputs.cluster_name }}" \
--set settings.aws.clusterName="${{ inputs.cluster_name }}" \
--set settings.aws.defaultInstanceProfile="KarpenterNodeInstanceProfile-${{ inputs.cluster_name }}" \
Expand All @@ -64,5 +64,5 @@ runs:
run: |
helm diff upgrade --namespace karpenter \
karpenter oci://public.ecr.aws/karpenter/karpenter \
--version v0-$(git rev-parse HEAD) \
--version v0.29.2 \
--reuse-values --three-way-merge --detailed-exitcode
33 changes: 11 additions & 22 deletions .github/actions/e2e/install-prometheus/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,30 +26,19 @@ alertmanager:
tolerations:
- key: CriticalAddonsOnly
operator: Exists
kubelet:
serviceMonitor:
additionalLabels:
scrape: enabled
prometheus:
prometheusSpec:
tolerations:
- key: CriticalAddonsOnly
operator: Exists
resources:
requests:
cpu: 1
memory: 5Gi
limits:
cpu: 1
memory: 5Gi
serviceMonitorSelector:
matchLabels:
scrape: enabled
serviceMonitorNamespaceSelector:
matchLabels:
scrape: enabled
remoteWrite:
- queueConfig:
maxSamplesPerSend: 1000
maxShards: 200
capacity: 2500
extraScrapeConfigs: |
- job_name: karpenter
kubernetes_sd_configs:
- role: endpoints
namespaces:
names:
- karpenter
relabel_configs:
- source_labels: [__meta_kubernetes_endpoint_port_name]
regex: http-metrics
action: keep
13 changes: 13 additions & 0 deletions .github/workflows/e2e-soak-trigger.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
name: E2ESoakTrigger
on:
schedule:
- cron: '0 */1 * * *'
workflow_dispatch:
jobs:
soak:
# if: github.repository == 'aws/karpenter' || github.event_name == 'workflow_dispatch'
uses: ./.github/workflows/e2e-soak.yaml
with:
event_name: ${{ github.event_name }}
secrets:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
132 changes: 132 additions & 0 deletions .github/workflows/e2e-soak.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
name: E2ESoak
on:
workflow_dispatch:
inputs:
git_ref:
type: string
region:
type: choice
options:
- "us-east-2"
- "us-west-2"
default: "us-east-2"
k8s_version:
type: choice
options:
- "1.23"
- "1.24"
- "1.25"
- "1.26"
- "1.27"
default: "1.27"
enable_metrics:
type: boolean
default: false
workflow_call:
inputs:
git_ref:
type: string
region:
type: string
default: "us-east-2"
event_name:
type: string
required: true
k8s_version:
type: string
default: "1.27"
enable_metrics:
type: boolean
default: false
secrets:
SLACK_WEBHOOK_URL:
required: true
permissions:
id-token: write # This is required for requesting the JWT
contents: read # This is required for actions/checkout
statuses: write
jobs:
run-suite:
name: suite-Soak
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.git_ref }}
- name: configure aws credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: arn:aws:iam::${{ vars.ACCOUNT_ID }}:role/${{ vars.ROLE_NAME }}
aws-region: ${{ inputs.region }}
role-duration-seconds: 21600
- uses: ./.github/actions/e2e/install-eksctl
with:
eksctl_version: v0.147.0
- name: find preexisting cluster
run: |
export PREEXISTING=$(eksctl get cluster -o json | jq '.[].Name' | grep soak)
echo "Found existing cluster name \"$PREEXISTING\""
echo PREEXISTING=$PREEXISTING >> $GITHUB_ENV
- name: generate cluster name
if: env.PREEXISTING == ''
run: |
CLUSTER_NAME=$(echo Soak-$RANDOM$RANDOM | awk '{print tolower($0)}')
echo "Using cluster name \"$CLUSTER_NAME\""
echo CLUSTER_NAME=$CLUSTER_NAME >> $GITHUB_ENV
- name: create eks cluster '${{ env.CLUSTER_NAME }}'
if: env.PREEXISTING == ''
uses: ./.github/actions/e2e/create-cluster
with:
account_id: ${{ vars.ACCOUNT_ID }}
role: ${{ vars.ROLE_NAME }}
region: ${{ inputs.region }}
cluster_name: ${{ env.CLUSTER_NAME }}
k8s_version: ${{ inputs.k8s_version }}
ip_family: 'IPv4'
git_ref: ${{ inputs.git_ref }}
- name: install prometheus
if: env.PREEXISTING == ''
uses: ./.github/actions/e2e/install-prometheus
with:
account_id: ${{ vars.ACCOUNT_ID }}
role: ${{ vars.ROLE_NAME }}
region: ${{ vars.PROMETHEUS_REGION }}
cluster_name: ${{ env.CLUSTER_NAME }}
workspace_id: ${{ vars.WORKSPACE_ID }}
git_ref: ${{ inputs.git_ref }}
- name: install karpenter
if: env.PREEXISTING == ''
uses: ./.github/actions/e2e/install-karpenter
with:
account_id: ${{ vars.ACCOUNT_ID }}
role: ${{ vars.ROLE_NAME }}
region: ${{ inputs.region }}
cluster_name: ${{ env.CLUSTER_NAME }}
git_ref: ${{ inputs.git_ref }}
- name: run the ${{ inputs.suite }} test suite new cluster
if: ${{ env.PREEXISTING }} != ""
run: |
aws eks update-kubeconfig --name ${{ env.PREEXISTING }}
TEST_SUITE="Soak" ENABLE_METRICS=${{ inputs.enable_metrics }} METRICS_REGION=${{ vars.TIMESTREAM_REGION }} GIT_REF="$(git rev-parse HEAD)" make e2etests
- name: run the ${{ inputs.suite }} test suite preexisiting
if: ${{ env.PREEXISTING }} == ""
run: |
aws eks update-kubeconfig --name ${{ env.CLUSTER_NAME }}
TEST_SUITE="Soak" ENABLE_METRICS=${{ inputs.enable_metrics }} METRICS_REGION=${{ vars.TIMESTREAM_REGION }} GIT_REF="$(git rev-parse HEAD)" make e2etests
- name: notify slack of success or failure
uses: ./.github/actions/e2e/slack/notify
if: (success() || failure()) && inputs.event_name != 'workflow_run' && inputs.event_name != 'conformance'
with:
url: ${{ secrets.SLACK_WEBHOOK_URL }}
suite: Soak
k8s_version: ${{ inputs.k8s_version }}
event_name: ${{ inputs.event_name }}
git_ref: ${{ inputs.git_ref }}
- name: dump logs on failure
uses: ./.github/actions/e2e/dump-logs
if: failure() || cancelled()
with:
account_id: ${{ vars.ACCOUNT_ID }}
role: ${{ vars.ROLE_NAME }}
region: ${{ inputs.region }}
cluster_name: ${{ env.CLUSTER_NAME }}
2 changes: 2 additions & 0 deletions charts/karpenter/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@ spec:
image: {{ include "karpenter.controller.image" . }}
imagePullPolicy: {{ .Values.imagePullPolicy }}
env:
- name: ENABLE_PROFILING
value: "true"
- name: KUBERNETES_MIN_VERSION
value: "1.19.0-0"
- name: KARPENTER_SERVICE
Expand Down
1 change: 1 addition & 0 deletions test/cloudformation/iam_cloudformation.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ Resources:
Resource: "*"
- Effect: Allow
Action:
- eks:ListClusters
- eks:CreateCluster
- eks:CreateAddon
- eks:CreateNodegroup
Expand Down
1 change: 1 addition & 0 deletions test/pkg/environment/common/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ func (env *Environment) ExpectCleanCluster() {
var nodes v1.NodeList
Expect(env.Client.List(env.Context, &nodes)).To(Succeed())
for _, node := range nodes.Items {
fmt.Println(node.Name)
if len(node.Spec.Taints) == 0 && !node.Spec.Unschedulable {
Fail(fmt.Sprintf("expected system pool node %s to be tainted", node.Name))
}
Expand Down
160 changes: 160 additions & 0 deletions test/suites/soak/suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package soak_test

import (
"context"
"fmt"
"math/rand"
"sync/atomic"
"testing"
"time"

"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/test"
nodeutils "github.com/aws/karpenter-core/pkg/utils/node"
"github.com/aws/karpenter/pkg/apis/settings"
"github.com/aws/karpenter/pkg/apis/v1alpha1"
awstest "github.com/aws/karpenter/pkg/test"
"github.com/aws/karpenter/test/pkg/debug"
"github.com/aws/karpenter/test/pkg/environment/aws"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client"

awssdk "github.com/aws/aws-sdk-go/aws"
)

var env *aws.Environment

func TestSoak(t *testing.T) {
RegisterFailHandler(Fail)
BeforeSuite(func() {
env = aws.NewEnvironment(t)
SetDefaultEventuallyTimeout(time.Hour)
})
RunSpecs(t, "Soak")
}

var _ = BeforeEach(func() { env.BeforeEach() })
var _ = AfterEach(func() { env.Cleanup() })
var _ = AfterEach(func() { env.AfterEach() })

var _ = Describe("Soak", func() {
It("should ", Label(debug.NoWatch), Label(debug.NoEvents), func() {
ctx, cancel := context.WithCancel(env.Context)
defer cancel()

// content, err := os.ReadFile("testdata/user.sh")
// Expect(err).NotTo(HaveOccurred())
provider := awstest.AWSNodeTemplate(v1alpha1.AWSNodeTemplateSpec{AWS: v1alpha1.AWS{
SecurityGroupSelector: map[string]string{"karpenter.sh/discovery": settings.FromContext(env.Context).ClusterName},
SubnetSelector: map[string]string{"karpenter.sh/discovery": settings.FromContext(env.Context).ClusterName},
},
// UserData: awssdk.String(string(content)),
})
provisioner := test.Provisioner(test.ProvisionerOptions{
ObjectMeta: metav1.ObjectMeta{
Name: "sock-test-provisioner",
},
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.CapacityTypeSpot},
},
},
Consolidation: &v1alpha5.Consolidation{
Enabled: lo.ToPtr(true),
},
ProviderRef: &v1alpha5.MachineTemplateRef{Name: provider.Name},
})
numPods := 0
dep := test.Deployment(test.DeploymentOptions{
Replicas: int32(numPods),
PodOptions: test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "my-app"},
},
TerminationGracePeriodSeconds: lo.ToPtr[int64](0),
},
})

dep.Spec.Template.Spec.Affinity = &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: dep.Spec.Selector,
TopologyKey: v1.LabelHostname,
},
},
},
}

// Create a deployment with a single pod
env.ExpectCreated(provider, provisioner, dep)
startNodeCountMonitor(ctx, env.Client)
time.Sleep(time.Second * 10)

Consistently(func(g Gomega) {
dep.Spec.Replicas = awssdk.Int32(int32(rand.Intn(100) + 1))

Check failure on line 117 in test/suites/soak/suite_test.go

View workflow job for this annotation

GitHub Actions / ci

G404: Use of weak random number generator (math/rand instead of crypto/rand) (gosec)
env.ExpectCreatedOrUpdated(dep)
time.Sleep(time.Minute * 5)
dep.Spec.Replicas = awssdk.Int32(0)
env.ExpectCreatedOrUpdated(dep)
time.Sleep(time.Second * 30)
}, time.Hour*2).Should(Succeed())
env.ExpectDeleted(provisioner, provider, dep)
})
})

func startNodeCountMonitor(ctx context.Context, kubeClient client.Client) {
createdNodes := atomic.Int64{}
deletedNodes := atomic.Int64{}

factory := informers.NewSharedInformerFactoryWithOptions(env.KubeClient, time.Second*30,
informers.WithTweakListOptions(func(l *metav1.ListOptions) { l.LabelSelector = v1alpha5.ProvisionerNameLabelKey }))
nodeInformer := factory.Core().V1().Nodes().Informer()
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{

Check failure on line 135 in test/suites/soak/suite_test.go

View workflow job for this annotation

GitHub Actions / ci

Error return value of `nodeInformer.AddEventHandler` is not checked (errcheck)
AddFunc: func(_ interface{}) {
createdNodes.Add(1)
},
DeleteFunc: func(_ interface{}) {
deletedNodes.Add(1)
},
})
factory.Start(ctx.Done())
go func() {
for {
list := &v1.NodeList{}
if err := kubeClient.List(ctx, list, client.HasLabels{test.DiscoveryLabel}); err == nil {
readyCount := lo.CountBy(list.Items, func(n v1.Node) bool {
return nodeutils.GetCondition(&n, v1.NodeReady).Status == v1.ConditionTrue
})
fmt.Printf("[NODE COUNT] CURRENT: %d | READY: %d | CREATED: %d | DELETED: %d\n", len(list.Items), readyCount, createdNodes.Load(), deletedNodes.Load())
}
select {
case <-ctx.Done():
return
case <-time.After(time.Second * 5):
}
}
}()
}
Loading

0 comments on commit 4d1628e

Please sign in to comment.