diff --git a/cmd/antrea-agent/agent.go b/cmd/antrea-agent/agent.go index 92cfae5c5be..ea3a0e5fec5 100644 --- a/cmd/antrea-agent/agent.go +++ b/cmd/antrea-agent/agent.go @@ -111,7 +111,7 @@ func run(o *Options) error { } k8s.OverrideKubeAPIServer(o.config.KubeAPIServerOverride) - informerFactory := informers.NewSharedInformerFactoryWithOptions(k8sClient, informerDefaultResync, informers.WithTransform(k8s.NewTrimmer())) + informerFactory := informers.NewSharedInformerFactoryWithOptions(k8sClient, informerDefaultResync, informers.WithTransform(k8s.NewTrimmer(k8s.TrimNode))) crdInformerFactory := crdinformers.NewSharedInformerFactoryWithOptions(crdClient, informerDefaultResync, crdinformers.WithTransform(k8s.NewTrimmer())) traceflowInformer := crdInformerFactory.Crd().V1beta1().Traceflows() egressInformer := crdInformerFactory.Crd().V1beta1().Egresses() diff --git a/pkg/util/k8s/transform.go b/pkg/util/k8s/transform.go index 22c4311313e..f8ea3c991ad 100644 --- a/pkg/util/k8s/transform.go +++ b/pkg/util/k8s/transform.go @@ -89,3 +89,14 @@ func TrimPod(obj interface{}) (interface{}, error) { pod.Status.ResourceClaimStatuses = nil return pod, nil } + +// TrimNode clears unused fields from a Node that are not required by Antrea. +// It's safe to do so because Antrea only patches Node. +func TrimNode(obj interface{}) (interface{}, error) { + node, ok := obj.(*corev1.Node) + if !ok { + return obj, nil + } + node.Status.Images = nil + return node, nil +} diff --git a/pkg/util/k8s/transform_test.go b/pkg/util/k8s/transform_test.go index 0aabfc455af..a4c2af00b2d 100644 --- a/pkg/util/k8s/transform_test.go +++ b/pkg/util/k8s/transform_test.go @@ -101,6 +101,74 @@ func TestTrimK8sObject(t *testing.T) { }, }, }, + { + name: "node", + trimmer: NewTrimmer(TrimNode), + obj: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + ManagedFields: []metav1.ManagedFieldsEntry{ + { + APIVersion: "v1", + FieldsType: "FieldsV1", + }, + }, + }, + Spec: corev1.NodeSpec{ + PodCIDR: "10.0.0.0/24", + PodCIDRs: []string{ + "10.0.0.0/24", + }, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + Reason: "KubeletReady", + Message: "kubelet is posting ready status. AppArmor enabled", + }, + }, + Images: []corev1.ContainerImage{ + { + Names: []string{ + "registry.k8s.io/kube-proxy@sha256:a9f441a6b440c634ccfe62530ab1c7ff2ea7ed3f577f91f6a71c7e2f51256410", + "registry.k8s.io/kube-proxy:v1.26.15", + }, + SizeBytes: 72051242, + }, + { + Names: []string{ + "registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db", + "registry.k8s.io/pause:3.6", + }, + SizeBytes: 682696, + }, + }, + }, + }, + want: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + Spec: corev1.NodeSpec{ + PodCIDR: "10.0.0.0/24", + PodCIDRs: []string{ + "10.0.0.0/24", + }, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + Reason: "KubeletReady", + Message: "kubelet is posting ready status. AppArmor enabled", + }, + }, + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) {