Skip to content

Commit

Permalink
fix: fix get null podCIDR and serviceCIDR
Browse files Browse the repository at this point in the history
fix: fix get null podCIDR and serviceCIDR

Signed-off-by: ruochen <[email protected]>
  • Loading branch information
0x0034 committed Dec 9, 2024
1 parent 3728fc7 commit fb36fbe
Show file tree
Hide file tree
Showing 2 changed files with 181 additions and 22 deletions.
63 changes: 41 additions & 22 deletions pkg/coordinatormanager/coordinator_informer.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import (
"time"

"github.com/cilium/cilium/pkg/ipam/option"
v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
"github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
"github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
cilium_externalversions "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions"
ciliumLister "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1"
Expand Down Expand Up @@ -465,15 +465,30 @@ func (cc *CoordinatorController) updatePodAndServerCIDR(ctx context.Context, log

var cm corev1.ConfigMap
var k8sPodCIDR, k8sServiceCIDR []string
if err := cc.APIReader.Get(ctx, types.NamespacedName{Namespace: metav1.NamespaceSystem, Name: "kubeadm-config"}, &cm); err == nil {
logger.Sugar().Infof("Trying to fetch the ClusterCIDR from kube-system/kubeadm-config")
k8sPodCIDR, k8sServiceCIDR = ExtractK8sCIDRFromKubeadmConfigMap(&cm)
logger.Sugar().Infof("kubeadm-config configMap k8sPodCIDR %v, k8sServiceCIDR %v", k8sPodCIDR, k8sServiceCIDR)
} else {
// try to get ClusterCIDR from kubeadm-config ConfigMap
err = cc.APIReader.Get(ctx, types.NamespacedName{
Namespace: metav1.NamespaceSystem,
Name: "kubeadm-config",
}, &cm)

if err == nil {
logger.Sugar().Info("Trying to fetch the ClusterCIDR from kube-system/kubeadm-config")
k8sPodCIDR, k8sServiceCIDR, err = ExtractK8sCIDRFromKubeadmConfigMap(&cm)
if err == nil {
// Success to get ClusterCIDR from kubeadm-config
logger.Sugar().Infof("Success get CIDR from kubeadm-config: PodCIDR=%v, ServiceCIDR=%v", k8sPodCIDR, k8sServiceCIDR)
} else {
logger.Sugar().Warnf("Failed get CIDR from kubeadm-config: %v", err)
}
}

// if kubeadm-config ConfigMap not found, try to get ClusterCIDR from kube-controller-manager Pod
if len(k8sPodCIDR) == 0 || len(k8sServiceCIDR) == 0 {
logger.Sugar().Warnf("failed to get kube-system/kubeadm-config: %v, trying to fetch the ClusterCIDR from kube-controller-manager", err)
var cmPodList corev1.PodList
err = cc.APIReader.List(ctx, &cmPodList, client.MatchingLabels{"component": "kube-controller-manager"})
if err != nil {
var podList corev1.PodList
listOptions := client.MatchingLabels{"component": "kube-controller-manager"}

if err := cc.APIReader.List(ctx, &podList, listOptions); err != nil {
logger.Sugar().Errorf("failed to get kube-controller-manager Pod with label \"component: kube-controller-manager\": %v", err)
event.EventRecorder.Eventf(
coordCopy,
Expand All @@ -485,14 +500,14 @@ func (cc *CoordinatorController) updatePodAndServerCIDR(ctx context.Context, log
return coordCopy
}

if len(cmPodList.Items) == 0 {
if len(podList.Items) == 0 {
errMsg := "No kube-controller-manager pod found, unable to get clusterCIDR"
logger.Error(errMsg)
setStatus2NoReady(logger, errMsg, coordCopy)
return coordCopy
}

k8sPodCIDR, k8sServiceCIDR = ExtractK8sCIDRFromKCMPod(&cmPodList.Items[0])
k8sPodCIDR, k8sServiceCIDR = ExtractK8sCIDRFromKCMPod(&podList.Items[0])
logger.Sugar().Infof("kube-controller-manager k8sPodCIDR %v, k8sServiceCIDR %v", k8sPodCIDR, k8sServiceCIDR)
}

Expand Down Expand Up @@ -757,20 +772,23 @@ func (cc *CoordinatorController) updateServiceCIDR(logger *zap.Logger, coordCopy
return nil
}

func ExtractK8sCIDRFromKubeadmConfigMap(cm *corev1.ConfigMap) ([]string, []string) {
func ExtractK8sCIDRFromKubeadmConfigMap(cm *corev1.ConfigMap) ([]string, []string, error) {
var podCIDR, serviceCIDR []string

podReg := regexp.MustCompile(`podSubnet: (.*)`)
serviceReg := regexp.MustCompile(`serviceSubnet: (.*)`)

var podSubnets, serviceSubnets []string
for _, data := range cm.Data {
podSubnets = podReg.FindStringSubmatch(data)
serviceSubnets = serviceReg.FindStringSubmatch(data)
clusterConfig, exists := cm.Data["ClusterConfiguration"]
if !exists {
return podCIDR, serviceCIDR, fmt.Errorf("unable to get kubeadm configmap ClusterConfiguration")
}

if len(podSubnets) != 0 {
podReg := regexp.MustCompile(`podSubnet:\s*(\S+)`)
serviceReg := regexp.MustCompile(`serviceSubnet:\s*(\S+)`)

podSubnets := podReg.FindStringSubmatch(clusterConfig)
serviceSubnets := serviceReg.FindStringSubmatch(clusterConfig)

if len(podSubnets) > 1 {
for _, cidr := range strings.Split(podSubnets[1], ",") {
cidr = strings.TrimSpace(cidr)
_, _, err := net.ParseCIDR(cidr)
if err != nil {
continue
Expand All @@ -779,8 +797,9 @@ func ExtractK8sCIDRFromKubeadmConfigMap(cm *corev1.ConfigMap) ([]string, []strin
}
}

if len(serviceSubnets) != 0 {
if len(serviceSubnets) > 1 {
for _, cidr := range strings.Split(serviceSubnets[1], ",") {
cidr = strings.TrimSpace(cidr)
_, _, err := net.ParseCIDR(cidr)
if err != nil {
continue
Expand All @@ -789,7 +808,7 @@ func ExtractK8sCIDRFromKubeadmConfigMap(cm *corev1.ConfigMap) ([]string, []strin
}
}

return podCIDR, serviceCIDR
return podCIDR, serviceCIDR, nil
}

func ExtractK8sCIDRFromKCMPod(kcm *corev1.Pod) ([]string, []string) {
Expand Down
140 changes: 140 additions & 0 deletions pkg/coordinatormanager/coordinator_informer_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
package coordinatormanager

import (
"encoding/json"
corev1 "k8s.io/api/core/v1"
"reflect"
"testing"
)

func TestExtractK8sCIDRFromKubeadmConfigMap(t *testing.T) {
type args struct {
cm *corev1.ConfigMap
}

clusterConfigurationInOneLineJson := `
{"metadata":{"name":"kubeadm-config","namespace":"kube-system","uid":"01abef5a-5f01-46c6-9a13-e9f5438e3a23","resourceVersion":"25563","creationTimestamp":"2024-12-06T10:45:23Z","annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"v1\",\"data\":{\"ClusterConfiguration\":\"apiServer:\\n certSANs:\\n - 127.0.0.1\\n - apiserver.cluster.local\\n - 10.103.97.2\\n - 192.168.165.128\\n extraArgs:\\n audit-log-format: json\\n audit-log-maxage: \\\"7\\\"\\n audit-log-maxbackup: \\\"10\\\"\\n audit-log-maxsize: \\\"100\\\"\\n audit-log-path: /var/log/kubernetes/audit.log\\n audit-policy-file: /etc/kubernetes/audit-policy.yml\\n authorization-mode: Node,RBAC\\n enable-aggregator-routing: \\\"true\\\"\\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\\n extraVolumes:\\n - hostPath: /etc/kubernetes\\n mountPath: /etc/kubernetes\\n name: audit\\n pathType: DirectoryOrCreate\\n - hostPath: /var/log/kubernetes\\n mountPath: /var/log/kubernetes\\n name: audit-log\\n pathType: DirectoryOrCreate\\n - hostPath: /etc/localtime\\n mountPath: /etc/localtime\\n name: localtime\\n pathType: File\\n readOnly: true\\n timeoutForControlPlane: 4m0s\\napiVersion: kubeadm.k8s.io/v1beta2\\ncertificatesDir: /etc/kubernetes/pki\\nclusterName: kubernetes\\ncontrolPlaneEndpoint: apiserver.cluster.local:6443\\ncontrollerManager:\\n extraArgs:\\n bind-address: 0.0.0.0\\n cluster-signing-duration: 876000h\\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\\n extraVolumes:\\n - hostPath: /etc/localtime\\n mountPath: /etc/localtime\\n name: localtime\\n pathType: File\\n readOnly: true\\ndns:\\n type: CoreDNS\\netcd:\\n local:\\n dataDir: /var/lib/etcd\\n extraArgs:\\n listen-metrics-urls: http://0.0.0.0:2381\\nimageRepository: k8s.gcr.io\\nkind: ClusterConfiguration\\nkubernetesVersion: v1.21.14\\nnetworking:\\n dnsDomain: cluster.local\\n podSubnet: 192.168.165.0/24\\n serviceSubnet: 245.100.128.0/18\\nscheduler:\\n extraArgs:\\n bind-address: 0.0.0.0\\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\\n extraVolumes:\\n - hostPath: /etc/localtime\\n mountPath: /etc/localtime\\n name: localtime\\n pathType: File\\n readOnly: true\\n\",\"ClusterStatus\":\"apiEndpoints:\\n anolios79:\\n advertiseAddress: 192.168.165.128\\n bindPort: 6443\\napiVersion: kubeadm.k8s.io/v1beta2\\nkind: ClusterStatus\\n\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"name\":\"kubeadm-config\",\"namespace\":\"kube-system\"}}\n"},"managedFields":[{"manager":"kubectl-client-side-apply","operation":"Update","apiVersion":"v1","time":"2024-12-06T10:45:23Z","fieldsType":"FieldsV1","fieldsV1":{"f:data":{".":{},"f:ClusterConfiguration":{},"f:ClusterStatus":{}},"f:metadata":{"f:annotations":{".":{},"f:kubectl.kubernetes.io/last-applied-configuration":{}}}}}]},"data":{"ClusterConfiguration":"apiServer:\n certSANs:\n - 127.0.0.1\n - apiserver.cluster.local\n - 10.103.97.2\n - 192.168.165.128\n extraArgs:\n audit-log-format: json\n audit-log-maxage: \"7\"\n audit-log-maxbackup: \"10\"\n audit-log-maxsize: \"100\"\n audit-log-path: /var/log/kubernetes/audit.log\n audit-policy-file: /etc/kubernetes/audit-policy.yml\n authorization-mode: Node,RBAC\n enable-aggregator-routing: \"true\"\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/kubernetes\n mountPath: /etc/kubernetes\n name: audit\n pathType: DirectoryOrCreate\n - hostPath: /var/log/kubernetes\n mountPath: /var/log/kubernetes\n name: audit-log\n pathType: DirectoryOrCreate\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\n timeoutForControlPlane: 4m0s\napiVersion: kubeadm.k8s.io/v1beta2\ncertificatesDir: /etc/kubernetes/pki\nclusterName: kubernetes\ncontrolPlaneEndpoint: apiserver.cluster.local:6443\ncontrollerManager:\n extraArgs:\n bind-address: 0.0.0.0\n cluster-signing-duration: 876000h\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\ndns:\n type: CoreDNS\netcd:\n local:\n dataDir: /var/lib/etcd\n extraArgs:\n listen-metrics-urls: http://0.0.0.0:2381\nimageRepository: k8s.gcr.io\nkind: ClusterConfiguration\nkubernetesVersion: v1.21.14\nnetworking:\n dnsDomain: cluster.local\n podSubnet: 192.168.165.0/24\n serviceSubnet: 245.100.128.0/18\nscheduler:\n extraArgs:\n bind-address: 0.0.0.0\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\n","ClusterStatus":"apiEndpoints:\n anolios79:\n advertiseAddress: 192.168.165.128\n bindPort: 6443\napiVersion: kubeadm.k8s.io/v1beta2\nkind: ClusterStatus\n"}}
`
clusterConfigurationJson := `{
"apiVersion": "v1",
"data": {
"ClusterConfiguration": "apiServer:\n certSANs:\n - 127.0.0.1\n - apiserver.cluster.local\n - 10.103.97.2\n - 192.168.165.128\n extraArgs:\n audit-log-format: json\n audit-log-maxage: \"7\"\n audit-log-maxbackup: \"10\"\n audit-log-maxsize: \"100\"\n audit-log-path: /var/log/kubernetes/audit.log\n audit-policy-file: /etc/kubernetes/audit-policy.yml\n authorization-mode: Node,RBAC\n enable-aggregator-routing: \"true\"\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/kubernetes\n mountPath: /etc/kubernetes\n name: audit\n pathType: DirectoryOrCreate\n - hostPath: /var/log/kubernetes\n mountPath: /var/log/kubernetes\n name: audit-log\n pathType: DirectoryOrCreate\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\n timeoutForControlPlane: 4m0s\napiVersion: kubeadm.k8s.io/v1beta2\ncertificatesDir: /etc/kubernetes/pki\nclusterName: kubernetes\ncontrolPlaneEndpoint: apiserver.cluster.local:6443\ncontrollerManager:\n extraArgs:\n bind-address: 0.0.0.0\n cluster-signing-duration: 876000h\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\ndns:\n type: CoreDNS\netcd:\n local:\n dataDir: /var/lib/etcd\n extraArgs:\n listen-metrics-urls: http://0.0.0.0:2381\nimageRepository: k8s.gcr.io\nkind: ClusterConfiguration\nkubernetesVersion: v1.21.14\nnetworking:\n dnsDomain: cluster.local\n podSubnet: 192.168.165.0/24\n serviceSubnet: 245.100.128.0/18\nscheduler:\n extraArgs:\n bind-address: 0.0.0.0\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\n",
"ClusterStatus": "apiEndpoints:\n anolios79:\n advertiseAddress: 192.168.165.128\n bindPort: 6443\napiVersion: kubeadm.k8s.io/v1beta2\nkind: ClusterStatus\n"
},
"kind": "ConfigMap",
"metadata": {
"creationTimestamp": "2024-12-06T08:58:59Z",
"name": "kubeadm-config",
"namespace": "kube-system",
"resourceVersion": "12661",
"uid": "bd80980d-4e5a-4e8a-85a1-7dd69a3d033f"
}
}`
noClusterConfigurationJson := `{
"apiVersion": "v1",
"data": {
"ClusterStatus": "apiEndpoints:\n anolios79:\n advertiseAddress: 192.168.165.128\n bindPort: 6443\napiVersion: kubeadm.k8s.io/v1beta2\nkind: ClusterStatus\n"
},
"kind": "ConfigMap",
"metadata": {
"creationTimestamp": "2024-12-06T08:58:59Z",
"name": "kubeadm-config",
"namespace": "kube-system",
"resourceVersion": "12661",
"uid": "bd80980d-4e5a-4e8a-85a1-7dd69a3d033f"
}
}`
noCIDRJson := `{
"apiVersion": "v1",
"data": {
"ClusterConfiguration": "apiServer:\n certSANs:\n - 127.0.0.1\n - apiserver.cluster.local\n - 10.103.97.2\n - 192.168.165.128\n extraArgs:\n audit-log-format: json\n audit-log-maxage: \"7\"\n audit-log-maxbackup: \"10\"\n audit-log-maxsize: \"100\"\n audit-log-path: /var/log/kubernetes/audit.log\n audit-policy-file: /etc/kubernetes/audit-policy.yml\n authorization-mode: Node,RBAC\n enable-aggregator-routing: \"true\"\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/kubernetes\n mountPath: /etc/kubernetes\n name: audit\n pathType: DirectoryOrCreate\n - hostPath: /var/log/kubernetes\n mountPath: /var/log/kubernetes\n name: audit-log\n pathType: DirectoryOrCreate\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\n timeoutForControlPlane: 4m0s\napiVersion: kubeadm.k8s.io/v1beta2\ncertificatesDir: /etc/kubernetes/pki\nclusterName: kubernetes\ncontrolPlaneEndpoint: apiserver.cluster.local:6443\ncontrollerManager:\n extraArgs:\n bind-address: 0.0.0.0\n cluster-signing-duration: 876000h\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\ndns:\n type: CoreDNS\netcd:\n local:\n dataDir: /var/lib/etcd\n extraArgs:\n listen-metrics-urls: http://0.0.0.0:2381\nimageRepository: k8s.gcr.io\nkind: ClusterConfiguration\nkubernetesVersion: v1.21.14\nnetworking:\n dnsDomain: cluster.local\nscheduler:\n extraArgs:\n bind-address: 0.0.0.0\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\n",
"ClusterStatus": "apiEndpoints:\n anolios79:\n advertiseAddress: 192.168.165.128\n bindPort: 6443\napiVersion: kubeadm.k8s.io/v1beta2\nkind: ClusterStatus\n"
},
"kind": "ConfigMap",
"metadata": {
"creationTimestamp": "2024-12-06T08:58:59Z",
"name": "kubeadm-config",
"namespace": "kube-system",
"resourceVersion": "12661",
"uid": "bd80980d-4e5a-4e8a-85a1-7dd69a3d033f"
}
}`

clusterConfigurationInOneLineCm := &corev1.ConfigMap{}
if err := json.Unmarshal([]byte(clusterConfigurationInOneLineJson), clusterConfigurationInOneLineCm); err != nil {
t.Fatalf("Failed to unmarshal clusterConfigurationInOneLineJson: %v", err)
}
clusterConfigurationJsonCm := &corev1.ConfigMap{}
if err := json.Unmarshal([]byte(clusterConfigurationJson), clusterConfigurationJsonCm); err != nil {
t.Fatalf("Failed to unmarshal clusterConfigurationJson: %v", err)
}

noClusterConfigurationJsonCm := &corev1.ConfigMap{}
if err := json.Unmarshal([]byte(noClusterConfigurationJson), noClusterConfigurationJsonCm); err != nil {
t.Fatalf("Failed to unmarshal noClusterConfigurationJson: %v", err)
}
noCIDRJsonCm := &corev1.ConfigMap{}
if err := json.Unmarshal([]byte(noCIDRJson), noCIDRJsonCm); err != nil {
t.Fatalf("Failed to unmarshal noCIDRJson: %v", err)
}

tests := []struct {
name string
args args
podCIDR []string
serviceCIDR []string
wantErr bool
}{
{
name: "ClusterConfiguration In One line",
args: args{
cm: clusterConfigurationInOneLineCm,
},
podCIDR: []string{"192.168.165.0/24"},
serviceCIDR: []string{"245.100.128.0/18"},
wantErr: false,
},
{
name: "ClusterConfiguration",
args: args{
cm: clusterConfigurationJsonCm,
},
podCIDR: []string{"192.168.165.0/24"},
serviceCIDR: []string{"245.100.128.0/18"},
wantErr: false,
},
{
name: "No ClusterConfiguration",
args: args{
cm: noClusterConfigurationJsonCm,
},
podCIDR: nil,
serviceCIDR: nil,
wantErr: true,
},
{
name: "No CIDR",
args: args{
cm: noCIDRJsonCm,
},
podCIDR: nil,
serviceCIDR: nil,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, got1, err := ExtractK8sCIDRFromKubeadmConfigMap(tt.args.cm)
if (err != nil) != tt.wantErr {
t.Errorf("ExtractK8sCIDRFromKubeadmConfigMap() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.podCIDR) {
t.Errorf("ExtractK8sCIDRFromKubeadmConfigMap() got = %v, podCIDR %v", got, tt.podCIDR)
}
if !reflect.DeepEqual(got1, tt.serviceCIDR) {
t.Errorf("ExtractK8sCIDRFromKubeadmConfigMap() got1 = %v, serviceCIDR %v", got1, tt.serviceCIDR)
}
})
}
}

0 comments on commit fb36fbe

Please sign in to comment.