diff --git a/.zappr.yaml b/.zappr.yaml index 691f9c19fc..19681490bb 100644 --- a/.zappr.yaml +++ b/.zappr.yaml @@ -11,7 +11,6 @@ approvals: minimum: 2 from: users: - - gargravarr - linki - mikkeloscar - szuecs @@ -22,7 +21,6 @@ approvals: - demonCoder95 - RomanZavodskikh - MustafaSaber - - lucastt # mandatory pull request labels pull-request: diff --git a/cluster/cluster.yaml b/cluster/cluster.yaml index 900c0f94a0..b953dbb6b9 100644 --- a/cluster/cluster.yaml +++ b/cluster/cluster.yaml @@ -52,6 +52,78 @@ Resources: Value: owned ToPort: 10250 Type: 'AWS::EC2::SecurityGroupIngress' +{{- if or (eq .Cluster.ConfigItems.control_plane_load_balancer_internal "pre") (eq .Cluster.ConfigItems.control_plane_load_balancer_internal "serving") (eq .Cluster.ConfigItems.control_plane_load_balancer_internal "active") }} + ControlPlaneInternalLB: + Type: AWS::ElasticLoadBalancingV2::LoadBalancer + Properties: + Name: "{{.Cluster.LocalID}}-nlb-internal" + LoadBalancerAttributes: + - Key: load_balancing.cross_zone.enabled + Value: true + Scheme: internal + Subnets: + {{ with $values := .Values }} + {{ range $az := $values.availability_zones }} + - "{{ index $values.lb_subnets $az }}" + {{ end }} + {{ end }} + Tags: + - Key: 'kubernetes.io/cluster/{{.Cluster.ID}}' + Value: owned + - Key: "component" + Value: "kube-apiserver" + Type: network + ControlPlaneInternalLBTargetGroup: + Type: AWS::ElasticLoadBalancingV2::TargetGroup + Properties: + HealthCheckIntervalSeconds: 10 + HealthCheckPort: 8443 + HealthCheckProtocol: HTTPS + HealthCheckPath: "/readyz" + HealthyThresholdCount: 2 + UnhealthyThresholdCount: 2 + Name: "{{.Cluster.LocalID}}-nlb-internal" + Port: 8443 + Protocol: TLS + Tags: + - Key: 'kubernetes.io/cluster/{{.Cluster.ID}}' + Value: owned + - Key: "component" + Value: "kube-apiserver" + VpcId: "{{.Cluster.ConfigItems.vpc_id}}" + TargetGroupAttributes: + - Key: deregistration_delay.timeout_seconds + Value: 60 + - Key: preserve_client_ip.enabled + Value: "false" + ControlPlaneInternalLBListener: + Type: AWS::ElasticLoadBalancingV2::Listener + Properties: + AlpnPolicy: + - {{ if eq .Cluster.ConfigItems.experimental_nlb_alpn_h2_enabled "true" }}HTTP2Preferred{{else}}None{{end}} + SslPolicy: "ELBSecurityPolicy-TLS-1-2-2017-01" + Certificates: + - CertificateArn: "{{.Values.load_balancer_certificate}}" + DefaultActions: + - Type: forward + TargetGroupArn: !Ref ControlPlaneInternalLBTargetGroup + LoadBalancerArn: !Ref ControlPlaneInternalLB + Port: 443 + Protocol: TLS + ControlPlaneInternalLBVersionDomain: + Properties: + AliasTarget: + DNSName: !GetAtt + - ControlPlaneInternalLB + - DNSName + HostedZoneId: !GetAtt + - ControlPlaneInternalLB + - CanonicalHostedZoneID + HostedZoneName: "{{.Values.hosted_zone}}." + Name: "{{.Cluster.LocalID}}-internal.{{.Values.hosted_zone}}." + Type: A + Type: 'AWS::Route53::RecordSet' +{{- end }} MasterLoadBalancerNLB: Type: AWS::ElasticLoadBalancingV2::LoadBalancer Properties: @@ -2503,6 +2575,12 @@ Outputs: Export: Name: '{{.Cluster.ID}}:master-load-balancer-nlb-target-group' Value: !Ref MasterLoadBalancerNLBTargetGroup +{{- if or (eq .Cluster.ConfigItems.control_plane_load_balancer_internal "pre") (eq .Cluster.ConfigItems.control_plane_load_balancer_internal "serving") (eq .Cluster.ConfigItems.control_plane_load_balancer_internal "active") }} + ControlPlaneInternalLBTargetGroup: + Export: + Name: '{{.Cluster.ID}}:control-plane-internal-lb-target-group' + Value: !Ref ControlPlaneInternalLBTargetGroup +{{- end }} MasterSecurityGroup: Export: Name: '{{.Cluster.ID}}:master-security-group' diff --git a/cluster/config-defaults.yaml b/cluster/config-defaults.yaml index 65aa0a735b..c14797507b 100644 --- a/cluster/config-defaults.yaml +++ b/cluster/config-defaults.yaml @@ -674,6 +674,9 @@ teapot_admission_controller_configmap_deletion_protection_enabled: "true" teapot_admission_controller_configmap_deletion_protection_factories_enabled: "true" {{end}} +# enable the rolebinding admission-controller webhook which validates rolebindings and clusterrolebindings +teapot_admission_controller_enable_rolebinding_webhook: "true" + # Enable and configure Pod Security Policy rules implemented in admission-controller. teapot_admission_controller_pod_security_policy_enabled: "true" @@ -1133,6 +1136,18 @@ control_plane_asg_lifecycle_hook: "false" # enable graceful shutdown on the control_plane nodes control_plane_graceful_shutdown: "true" +# Optionally enable an internal load balancer for the control plane nodes +# additionally to the public load balancer. +# +# Possible values: +# none - Load Balancer is not created +# pre - Load Balancer is created but not attached to control plane nodes +# serving - Load Balancer is created and attached to control plane nodes. +# active - Load Balancer is being actively used by worker nodes. +# +# For rolling back it needs to be done in multiple stages: active -> serving -> pre -> none +control_plane_load_balancer_internal: "none" + # This allows setting custom sysctl settings. The config-item is intended to be # used on node-pools rather being set globally. # diff --git a/cluster/manifests/01-admission-control/teapot.yaml b/cluster/manifests/01-admission-control/teapot.yaml index 7ba1ca80dc..315d147034 100644 --- a/cluster/manifests/01-admission-control/teapot.yaml +++ b/cluster/manifests/01-admission-control/teapot.yaml @@ -252,3 +252,18 @@ webhooks: apiGroups: [""] apiVersions: ["v1"] resources: ["services"] +{{- if eq .Cluster.ConfigItems.teapot_admission_controller_enable_rolebinding_webhook "true" }} + - name: rolebinding-admitter.teapot.zalan.do + clientConfig: + url: "https://localhost:8085/rolebinding" + caBundle: "{{ .Cluster.ConfigItems.ca_cert_decompressed }}" + admissionReviewVersions: ["v1beta1"] + failurePolicy: Fail + sideEffects: "NoneOnDryRun" + matchPolicy: Equivalent + rules: + - operations: [ "CREATE", "UPDATE" ] + apiGroups: ["rbac.authorization.k8s.io"] + apiVersions: ["v1"] + resources: ["rolebindings", "clusterrolebindings"] +{{- end }} diff --git a/cluster/manifests/cluster-lifecycle-controller/deployment.yaml b/cluster/manifests/cluster-lifecycle-controller/deployment.yaml index a953386f26..56706dfac6 100644 --- a/cluster/manifests/cluster-lifecycle-controller/deployment.yaml +++ b/cluster/manifests/cluster-lifecycle-controller/deployment.yaml @@ -35,7 +35,7 @@ spec: operator: Exists containers: - name: cluster-lifecycle-controller - image: container-registry.zalando.net/teapot/cluster-lifecycle-controller:master-43 + image: container-registry.zalando.net/teapot/cluster-lifecycle-controller:master-44 args: - --drain-grace-period={{.Cluster.ConfigItems.drain_grace_period}} - --drain-min-pod-lifetime={{.Cluster.ConfigItems.drain_min_pod_lifetime}} diff --git a/cluster/manifests/e2e-resources/pool-reserve.yaml b/cluster/manifests/e2e-resources/pool-reserve.yaml index 71b1c86764..cfac85a478 100644 --- a/cluster/manifests/e2e-resources/pool-reserve.yaml +++ b/cluster/manifests/e2e-resources/pool-reserve.yaml @@ -46,7 +46,7 @@ spec: terminationGracePeriodSeconds: 0 containers: - name: pause - image: container-registry.zalando.net/teapot/pause:3.7-master-21 + image: container-registry.zalando.net/teapot/pause:3.9-master-23 resources: limits: cpu: 1m diff --git a/cluster/manifests/kube-cluster-autoscaler/buffer-pods-deployment.yaml b/cluster/manifests/kube-cluster-autoscaler/buffer-pods-deployment.yaml index fa32f1ab1f..0c1c4f5819 100644 --- a/cluster/manifests/kube-cluster-autoscaler/buffer-pods-deployment.yaml +++ b/cluster/manifests/kube-cluster-autoscaler/buffer-pods-deployment.yaml @@ -35,7 +35,7 @@ spec: terminationGracePeriodSeconds: 0 containers: - name: pause - image: container-registry.zalando.net/teapot/pause:3.7-master-21 + image: container-registry.zalando.net/teapot/pause:3.9-master-23 resources: limits: cpu: {{$data.Cluster.ConfigItems.autoscaling_buffer_cpu}} diff --git a/cluster/node-pools/master-default/stack.yaml b/cluster/node-pools/master-default/stack.yaml index f333b9ed84..5e44ea9666 100644 --- a/cluster/node-pools/master-default/stack.yaml +++ b/cluster/node-pools/master-default/stack.yaml @@ -44,6 +44,9 @@ Resources: {{ end }} TargetGroupARNs: - !ImportValue '{{ .Cluster.ID }}:master-load-balancer-nlb-target-group' +{{- if or (eq .Cluster.ConfigItems.control_plane_load_balancer_internal "serving") (eq .Cluster.ConfigItems.control_plane_load_balancer_internal "active") }} + - !ImportValue '{{ .Cluster.ID }}:control-plane-internal-lb-target-group' +{{- end}} Type: 'AWS::AutoScaling::AutoScalingGroup' LaunchTemplate: Properties: diff --git a/cluster/node-pools/master-default/userdata.yaml b/cluster/node-pools/master-default/userdata.yaml index 7a8bd4d457..f70b8fae7e 100644 --- a/cluster/node-pools/master-default/userdata.yaml +++ b/cluster/node-pools/master-default/userdata.yaml @@ -357,7 +357,7 @@ write_files: - mountPath: /etc/kubernetes/k8s-authnz-webhook-kubeconfig name: k8s-authnz-webhook-kubeconfig readOnly: true - - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/foundation/platform-iam-tokeninfo:master-129 + - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/foundation/platform-iam-tokeninfo:master-130 name: tokeninfo ports: - containerPort: 9021 @@ -388,7 +388,7 @@ write_files: value: {{ .Cluster.ConfigItems.apiserver_business_partner_ids }} {{ if ne .Cluster.Environment "production" }} - name: tokeninfo-sandbox - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/foundation/platform-iam-tokeninfo:master-129 + image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/foundation/platform-iam-tokeninfo:master-130 ports: - containerPort: 9022 lifecycle: @@ -600,7 +600,7 @@ write_files: containers: - name: kube-controller-manager {{- if eq .Cluster.ConfigItems.kubernetes_controller_manager_image "zalando" }} - image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/teapot/kube-controller-manager-internal:v1.31.1-master-131 + image: 926694233939.dkr.ecr.eu-central-1.amazonaws.com/production_namespace/teapot/kube-controller-manager-internal:v1.31.2-master-132 {{- else }} image: nonexistent.zalan.do/teapot/kube-controller-manager:fixed {{- end }} diff --git a/cluster/node-pools/worker-splitaz/userdata.yaml b/cluster/node-pools/worker-splitaz/userdata.yaml index 3ebefa352c..b19b6fb1e3 100644 --- a/cluster/node-pools/worker-splitaz/userdata.yaml +++ b/cluster/node-pools/worker-splitaz/userdata.yaml @@ -34,7 +34,11 @@ write_files: clusters: - name: local cluster: +{{- if eq .Cluster.ConfigItems.control_plane_load_balancer_internal "active" }} + server: "https://{{.Cluster.LocalID}}-internal.{{.Values.hosted_zone}}" +{{- else }} server: {{ .Cluster.APIServerURL }} +{{- end }} users: - name: kubelet user: