From 57fb732c7f17bb76e76f830d62999ebed019d48c Mon Sep 17 00:00:00 2001 From: Antonio Ojea Date: Sun, 21 Apr 2024 13:18:40 +0000 Subject: [PATCH] add admin network policy --- cmd/main.go | 31 +- go.mod | 1 + go.sum | 2 + pkg/networkpolicy/adminnetworkpolicy.go | 282 +++++++++ pkg/networkpolicy/adminnetworkpolicy_test.go | 614 +++++++++++++++++++ pkg/networkpolicy/controller.go | 46 +- pkg/networkpolicy/controller_test.go | 112 ++++ pkg/networkpolicy/networkpolicy_test.go | 82 --- 8 files changed, 1083 insertions(+), 87 deletions(-) create mode 100644 pkg/networkpolicy/adminnetworkpolicy.go create mode 100644 pkg/networkpolicy/adminnetworkpolicy_test.go create mode 100644 pkg/networkpolicy/controller_test.go diff --git a/cmd/main.go b/cmd/main.go index 55db485..0b90c3a 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -13,9 +13,13 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "sigs.k8s.io/knftables" "sigs.k8s.io/kube-network-policies/pkg/networkpolicy" + npaclient "sigs.k8s.io/network-policy-api/pkg/client/clientset/versioned" + npainformers "sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions" + "sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/apis/v1alpha1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/informers" + v1 "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/klog/v2" @@ -25,12 +29,14 @@ import ( var ( failOpen bool + adminNetworkPolicy bool // AdminNetworkPolicy is alpha so keep it feature gated behind a flag queueID int metricsBindAddress string ) func init() { flag.BoolVar(&failOpen, "fail-open", false, "If set, don't drop packets if the controller is not running") + flag.BoolVar(&adminNetworkPolicy, "admin-network-policy", false, "If set, enable Admin Network Policy API") flag.IntVar(&queueID, "nfqueue-id", 100, "Number of the nfqueue used") flag.StringVar(&metricsBindAddress, "metrics-bind-address", ":9080", "The IP address and port for the metrics server to serve on") @@ -55,8 +61,9 @@ func main() { } cfg := networkpolicy.Config{ - FailOpen: failOpen, - QueueID: queueID, + AdminNetworkPolicy: adminNetworkPolicy, + FailOpen: failOpen, + QueueID: queueID, } // creates the in-cluster config config, err := rest.InClusterConfig() @@ -83,6 +90,20 @@ func main() { informersFactory := informers.NewSharedInformerFactory(clientset, 0) + var npaClient *npaclient.Clientset + var npaInformerFactory npainformers.SharedInformerFactory + var npaInformer v1alpha1.AdminNetworkPolicyInformer + var nodeInformer v1.NodeInformer + if adminNetworkPolicy { + nodeInformer = informersFactory.Core().V1().Nodes() + npaClient, err = npaclient.NewForConfig(config) + if err != nil { + klog.Fatalf("Failed to create Network client: %v", err) + } + npaInformerFactory = npainformers.NewSharedInformerFactory(npaClient, 0) + npaInformer = npaInformerFactory.Policy().V1alpha1().AdminNetworkPolicies() + } + http.Handle("/metrics", promhttp.Handler()) go func() { err := http.ListenAndServe(metricsBindAddress, nil) @@ -95,6 +116,9 @@ func main() { informersFactory.Networking().V1().NetworkPolicies(), informersFactory.Core().V1().Namespaces(), informersFactory.Core().V1().Pods(), + nodeInformer, + npaClient, + npaInformer, cfg, ) go func() { @@ -103,6 +127,9 @@ func main() { }() informersFactory.Start(ctx.Done()) + if adminNetworkPolicy { + npaInformerFactory.Start(ctx.Done()) + } select { case <-signalCh: diff --git a/go.mod b/go.mod index d704179..db0f1be 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( k8s.io/klog/v2 v2.120.1 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 sigs.k8s.io/knftables v0.0.16 + sigs.k8s.io/network-policy-api v0.1.5 ) require ( diff --git a/go.sum b/go.sum index ad1b0d9..a9661b2 100644 --- a/go.sum +++ b/go.sum @@ -195,6 +195,8 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMm sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/knftables v0.0.16 h1:ZpTfNsjnidgoXdxxzcZLdSctqkpSO3QB3jo3zQ4PXqM= sigs.k8s.io/knftables v0.0.16/go.mod h1:f/5ZLKYEUPUhVjUCg6l80ACdL7CIIyeL0DxfgojGRTk= +sigs.k8s.io/network-policy-api v0.1.5 h1:xyS7VAaM9EfyB428oFk7WjWaCK6B129i+ILUF4C8l6E= +sigs.k8s.io/network-policy-api v0.1.5/go.mod h1:D7Nkr43VLNd7iYryemnj8qf0N/WjBzTZDxYA+g4u1/Y= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/pkg/networkpolicy/adminnetworkpolicy.go b/pkg/networkpolicy/adminnetworkpolicy.go new file mode 100644 index 0000000..e565fb2 --- /dev/null +++ b/pkg/networkpolicy/adminnetworkpolicy.go @@ -0,0 +1,282 @@ +package networkpolicy + +import ( + "cmp" + "fmt" + "net" + "slices" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/klog/v2" + npav1alpha1 "sigs.k8s.io/network-policy-api/apis/v1alpha1" +) + +// adminNetworkPolicyAction evaluate the Admin Network Policies on the packet +// that is evaluated both from the origin and the destination perspective. +// There can be 3 possible outcomes: +// 1. Policies Allow in both direction then the packet is Allowed and NOT evaluated by Network Policies +// 2. Policies Deny in any direction then the packet is Dropped and NOT evaluated by Network Policies +// 3. Policies Pass and or Allow then the packet is passed to be evaluated by Network Policies +func (c *Controller) adminNetworkPolicyAction(p packet) npav1alpha1.AdminNetworkPolicyRuleAction { + srcIP := p.srcIP + srcPod := c.getPodAssignedToIP(srcIP.String()) + srcPort := p.srcPort + dstIP := p.dstIP + dstPod := c.getPodAssignedToIP(dstIP.String()) + dstPort := p.dstPort + protocol := p.proto + srcPodAdminNetworkPolices := c.getAdminNetworkPoliciesForPod(srcPod) + dstPodAdminNetworkPolices := c.getAdminNetworkPoliciesForPod(dstPod) + + msg := fmt.Sprintf("checking packet %s", p.String()) + if srcPod != nil { + msg += fmt.Sprintf("\nSrcPod (%s/%s): %d AdminNetworkPolicy", srcPod.Name, srcPod.Namespace, len(srcPodAdminNetworkPolices)) + } + if dstPod != nil { + msg += fmt.Sprintf("\nDstPod (%s/%s): %d AdminNetworkPolicy", dstPod.Name, dstPod.Namespace, len(dstPodAdminNetworkPolices)) + } + klog.V(2).Infof("%s", msg) + + egressAction := c.evaluateAdminEgress(srcPodAdminNetworkPolices, dstPod, dstIP, dstPort, protocol) + ingressAction := c.evaluateAdminIngress(dstPodAdminNetworkPolices, srcPod, srcPort, protocol) + + // Traffic is denied in at least one direction + if ingressAction == npav1alpha1.AdminNetworkPolicyRuleActionDeny || + egressAction == npav1alpha1.AdminNetworkPolicyRuleActionDeny { + return npav1alpha1.AdminNetworkPolicyRuleActionDeny + } + + // Traffic is allowed in both direction + if ingressAction == npav1alpha1.AdminNetworkPolicyRuleActionAllow && + egressAction == npav1alpha1.AdminNetworkPolicyRuleActionAllow { + return npav1alpha1.AdminNetworkPolicyRuleActionAllow + } + + // Traffic has to pass to be passed to Network Policies + return npav1alpha1.AdminNetworkPolicyRuleActionPass +} + +func (c *Controller) evaluateAdminEgress(adminNetworkPolices []*npav1alpha1.AdminNetworkPolicy, pod *v1.Pod, ip net.IP, port int, protocol v1.Protocol) npav1alpha1.AdminNetworkPolicyRuleAction { + for _, policy := range adminNetworkPolices { + for _, rule := range policy.Spec.Egress { + // Ports allows for matching traffic based on port and protocols. + // This field is a list of destination ports for the outgoing egress traffic. + // If Ports is not set then the rule does not filter traffic via port. + if rule.Ports != nil { + if !evaluateAdminNetworkPolicyPort(*rule.Ports, pod, port, protocol) { + continue + } + } + // To is the List of destinations whose traffic this rule applies to. + // If any AdminNetworkPolicyEgressPeer matches the destination of outgoing + // traffic then the specified action is applied. + // This field must be defined and contain at least one item. + for _, to := range rule.To { + // Exactly one of the selector pointers must be set for a given peer. If a + // consumer observes none of its fields are set, they must assume an unknown + // option has been specified and fail closed. + if to.Namespaces != nil { + if c.namespaceSelector(to.Namespaces, pod) { + return rule.Action + } + } + + if to.Pods != nil { + if c.namespaceSelector(&to.Pods.NamespaceSelector, pod) && + podSelector(&to.Pods.PodSelector, pod) { + return rule.Action + } + } + + if to.Nodes != nil { + if c.nodeSelector(to.Nodes, pod) { + return rule.Action + } + } + + for _, network := range to.Networks { + _, cidr, err := net.ParseCIDR(string(network)) + if err != nil { // this has been validated by the API + continue + } + if cidr.Contains(ip) { + return rule.Action + } + } + } + } + } + + return npav1alpha1.AdminNetworkPolicyRuleActionPass +} + +func (c *Controller) evaluateAdminIngress(adminNetworkPolices []*npav1alpha1.AdminNetworkPolicy, pod *v1.Pod, port int, protocol v1.Protocol) npav1alpha1.AdminNetworkPolicyRuleAction { + for _, policy := range adminNetworkPolices { + // Ingress is the list of Ingress rules to be applied to the selected pods. A total of 100 rules will be allowed in each ANP instance. The relative precedence of ingress rules within a single ANP object (all of which share the priority) will be determined by the order in which the rule is written. Thus, a rule that appears at the top of the ingress rules would take the highest precedence. + // ANPs with no ingress rules do not affect ingress traffic. + for _, rule := range policy.Spec.Ingress { + // Ports allows for matching traffic based on port and protocols. + // This field is a list of destination ports for the outgoing egress traffic. + // If Ports is not set then the rule does not filter traffic via port. + if rule.Ports != nil { + if !evaluateAdminNetworkPolicyPort(*rule.Ports, pod, port, protocol) { + continue + } + } + // To is the List of destinations whose traffic this rule applies to. + // If any AdminNetworkPolicyEgressPeer matches the destination of outgoing + // traffic then the specified action is applied. + // This field must be defined and contain at least one item. + for _, from := range rule.From { + // Exactly one of the selector pointers must be set for a given peer. If a + // consumer observes none of its fields are set, they must assume an unknown + // option has been specified and fail closed. + if from.Namespaces != nil { + if c.namespaceSelector(from.Namespaces, pod) { + return rule.Action + } + } + + if from.Pods != nil { + if c.namespaceSelector(&from.Pods.NamespaceSelector, pod) && + podSelector(&from.Pods.PodSelector, pod) { + return rule.Action + } + } + } + + } + } + + return npav1alpha1.AdminNetworkPolicyRuleActionPass +} + +// namespaceSelector return true if the namespace selector matches the pod +func (c *Controller) namespaceSelector(selector *metav1.LabelSelector, pod *v1.Pod) bool { + nsSelector, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return false + } + + namespaces, err := c.namespaceLister.List(nsSelector) + if err != nil { + return false + } + + for _, ns := range namespaces { + if pod.Namespace == ns.Name { + return true + } + } + return false +} + +// podSelector return true if the pod selector matches the pod +func podSelector(selector *metav1.LabelSelector, pod *v1.Pod) bool { + podSelector, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return false + } + return podSelector.Matches(labels.Set(pod.Labels)) +} + +// nodeSelector return true if the node selector matches the pod +func (c *Controller) nodeSelector(selector *metav1.LabelSelector, pod *v1.Pod) bool { + nodeSelector, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return false + } + nodes, err := c.namespaceLister.List(nodeSelector) + if err != nil { + return false + } + + for _, node := range nodes { + if pod.Spec.NodeName == node.Name { + return true + } + } + return false +} + +// getAdminNetworkPoliciesForPod returns the list of Admin Network Policies matching the Pod +// The list is ordered by priority, from higher to lower. +func (c *Controller) getAdminNetworkPoliciesForPod(pod *v1.Pod) []*npav1alpha1.AdminNetworkPolicy { + if pod == nil { + return nil + } + // Get all the network policies that affect this pod + networkPolices, err := c.adminNetworkPolicyLister.List(labels.Everything()) + if err != nil { + klog.Infof("getAdminNetworkPoliciesForPod error: %v", err) + return nil + } + + result := []*npav1alpha1.AdminNetworkPolicy{} + for _, policy := range networkPolices { + if policy.Spec.Subject.Namespaces != nil && + c.namespaceSelector(policy.Spec.Subject.Namespaces, pod) { + klog.V(2).Infof("Pod %s/%s match AdminNetworkPolicy %s", pod.Name, pod.Namespace, policy.Name) + result = append(result, policy) + } + + if policy.Spec.Subject.Pods != nil && + c.namespaceSelector(&policy.Spec.Subject.Pods.NamespaceSelector, pod) && + podSelector(&policy.Spec.Subject.Pods.PodSelector, pod) { + klog.V(2).Infof("Pod %s/%s match AdminNetworkPolicy %s", pod.Name, pod.Namespace, policy.Name) + result = append(result, policy) + } + } + // Rules with lower priority values have higher precedence + slices.SortFunc(result, func(a, b *npav1alpha1.AdminNetworkPolicy) int { + if n := cmp.Compare(a.Spec.Priority, b.Spec.Priority); n != 0 { + return n + } + // If priorities are equal, order by name + return cmp.Compare(a.Name, b.Name) + }) + return result +} + +func evaluateAdminNetworkPolicyPort(networkPolicyPorts []npav1alpha1.AdminNetworkPolicyPort, pod *v1.Pod, port int, protocol v1.Protocol) bool { + // AdminNetworkPolicyPort describes how to select network ports on pod(s). + // Exactly one field must be set. + if len(networkPolicyPorts) == 0 { + return true + } + + for _, policyPort := range networkPolicyPorts { + // Port number + if policyPort.PortNumber != nil && + policyPort.PortNumber.Port == int32(port) && + policyPort.PortNumber.Protocol == protocol { + return true + } + + // Named Port + if policyPort.NamedPort != nil { + if pod == nil { + continue + } + for _, container := range pod.Spec.Containers { + for _, p := range container.Ports { + if p.Name == *policyPort.NamedPort { + return true + } + } + } + } + + // Port range + if policyPort.PortRange != nil && + policyPort.PortRange.Protocol == protocol && + policyPort.PortRange.Start <= int32(port) && + policyPort.PortRange.End >= int32(port) { + return true + } + + } + return false +} diff --git a/pkg/networkpolicy/adminnetworkpolicy_test.go b/pkg/networkpolicy/adminnetworkpolicy_test.go new file mode 100644 index 0000000..b75d3d8 --- /dev/null +++ b/pkg/networkpolicy/adminnetworkpolicy_test.go @@ -0,0 +1,614 @@ +package networkpolicy + +import ( + "net" + "testing" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/component-base/logs" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + npav1alpha1 "sigs.k8s.io/network-policy-api/apis/v1alpha1" +) + +type adminNetpolTweak func(networkPolicy *npav1alpha1.AdminNetworkPolicy) + +func makeAdminNetworkPolicyCustom(name, ns string, tweaks ...adminNetpolTweak) *npav1alpha1.AdminNetworkPolicy { + networkAdminPolicy := &npav1alpha1.AdminNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, + Spec: npav1alpha1.AdminNetworkPolicySpec{}, + } + for _, fn := range tweaks { + fn(networkAdminPolicy) + } + return networkAdminPolicy +} + +func Test_adminNetworkPolicyAction(t *testing.T) { + _, err := logs.GlogSetter("4") + if err != nil { + t.Fatal(err) + } + state := klog.CaptureState() + t.Cleanup(state.Restore) + + podA := makePod("a", "foo", "192.168.1.11") + podB := makePod("b", "bar", "192.168.2.22") + // podC will not match neither selectors or namespaces + podC := makePod("c", "blocked", "192.168.3.33") + podC.Labels = map[string]string{"c": "d"} + // podD is same namespace PodB with different selectors + podD := makePod("d", "bar", "192.168.4.44") + podD.Labels = map[string]string{"c": "d"} + + npaDefaultDenyIngress := makeAdminNetworkPolicyCustom("default-deny-ingress", "bar", + func(networkPolicy *npav1alpha1.AdminNetworkPolicy) { + networkPolicy.Spec.Subject = npav1alpha1.AdminNetworkPolicySubject{Namespaces: &metav1.LabelSelector{}} + networkPolicy.Spec.Priority = 100 + networkPolicy.Spec.Ingress = []npav1alpha1.AdminNetworkPolicyIngressRule{{ + Action: npav1alpha1.AdminNetworkPolicyRuleActionDeny, + From: []npav1alpha1.AdminNetworkPolicyIngressPeer{{Namespaces: &metav1.LabelSelector{}}}, + }} + }) + + npaDefaultDenyEgress := makeAdminNetworkPolicyCustom("default-deny-egress", "bar", + func(networkPolicy *npav1alpha1.AdminNetworkPolicy) { + networkPolicy.Spec.Subject = npav1alpha1.AdminNetworkPolicySubject{Namespaces: &metav1.LabelSelector{}} + networkPolicy.Spec.Priority = 100 + networkPolicy.Spec.Egress = []npav1alpha1.AdminNetworkPolicyEgressRule{{ + Action: npav1alpha1.AdminNetworkPolicyRuleActionDeny, + To: []npav1alpha1.AdminNetworkPolicyEgressPeer{{Namespaces: &metav1.LabelSelector{}}}, + }} + }) + + npaAllowAllIngress := makeAdminNetworkPolicyCustom("default-allow-ingress", "bar", + func(networkPolicy *npav1alpha1.AdminNetworkPolicy) { + networkPolicy.Spec.Subject = npav1alpha1.AdminNetworkPolicySubject{Namespaces: &metav1.LabelSelector{}} + networkPolicy.Spec.Priority = 100 + networkPolicy.Spec.Ingress = []npav1alpha1.AdminNetworkPolicyIngressRule{{ + Action: npav1alpha1.AdminNetworkPolicyRuleActionAllow, + From: []npav1alpha1.AdminNetworkPolicyIngressPeer{{Namespaces: &metav1.LabelSelector{}}}, + }} + }) + + npaAllowAllIngressPod := makeAdminNetworkPolicyCustom("default-allow-ingress-pods", "bar", + func(networkPolicy *npav1alpha1.AdminNetworkPolicy) { + networkPolicy.Spec.Subject = npav1alpha1.AdminNetworkPolicySubject{Namespaces: &metav1.LabelSelector{}} + networkPolicy.Spec.Priority = 110 + networkPolicy.Spec.Ingress = []npav1alpha1.AdminNetworkPolicyIngressRule{{ + Action: npav1alpha1.AdminNetworkPolicyRuleActionDeny, + From: []npav1alpha1.AdminNetworkPolicyIngressPeer{{ + Pods: &npav1alpha1.NamespacedPod{ + PodSelector: metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}, + }, + }}, + }} + }) + + npaMultiPortEgress := makeAdminNetworkPolicyCustom("multiport-egress", "foo", + func(networkPolicy *npav1alpha1.AdminNetworkPolicy) { + networkPolicy.Spec.Subject = npav1alpha1.AdminNetworkPolicySubject{Namespaces: &metav1.LabelSelector{}} + networkPolicy.Spec.Priority = 100 + networkPolicy.Spec.Egress = []npav1alpha1.AdminNetworkPolicyEgressRule{{ + Action: npav1alpha1.AdminNetworkPolicyRuleActionDeny, + To: []npav1alpha1.AdminNetworkPolicyEgressPeer{{Namespaces: &metav1.LabelSelector{}}}, + Ports: &[]npav1alpha1.AdminNetworkPolicyPort{{ + PortRange: &npav1alpha1.PortRange{ + Protocol: v1.ProtocolTCP, + Start: 80, + End: 120, + }}}, + }} + }) + + npaMultiPortEgressCIDR := makeAdminNetworkPolicyCustom("multiport-egress", "foo", + func(networkPolicy *npav1alpha1.AdminNetworkPolicy) { + networkPolicy.Spec.Subject = npav1alpha1.AdminNetworkPolicySubject{Namespaces: &metav1.LabelSelector{}} + networkPolicy.Spec.Priority = 100 + networkPolicy.Spec.Egress = []npav1alpha1.AdminNetworkPolicyEgressRule{{ + Action: npav1alpha1.AdminNetworkPolicyRuleActionDeny, + To: []npav1alpha1.AdminNetworkPolicyEgressPeer{{ + Networks: []npav1alpha1.CIDR{"192.168.0.0/16"}, + }}, + Ports: &[]npav1alpha1.AdminNetworkPolicyPort{{ + PortRange: &npav1alpha1.PortRange{ + Protocol: v1.ProtocolTCP, + Start: 80, + End: 120, + }}}, + }} + }) + + npaMultiPortEgressPodSelector := makeAdminNetworkPolicyCustom("multiport-egress", "foo", + func(networkPolicy *npav1alpha1.AdminNetworkPolicy) { + networkPolicy.Spec.Subject = npav1alpha1.AdminNetworkPolicySubject{Namespaces: &metav1.LabelSelector{}} + networkPolicy.Spec.Priority = 100 + networkPolicy.Spec.Egress = []npav1alpha1.AdminNetworkPolicyEgressRule{{ + Action: npav1alpha1.AdminNetworkPolicyRuleActionDeny, + To: []npav1alpha1.AdminNetworkPolicyEgressPeer{{ + Pods: &npav1alpha1.NamespacedPod{ + PodSelector: metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}, + }, + }}, + Ports: &[]npav1alpha1.AdminNetworkPolicyPort{{ + PortRange: &npav1alpha1.PortRange{ + Protocol: v1.ProtocolTCP, + Start: 80, + End: 120, + }}}, + }} + }) + + npaMultiPortEgressNsSelector := makeAdminNetworkPolicyCustom("multiport-egress-ns", "foo", + func(networkPolicy *npav1alpha1.AdminNetworkPolicy) { + networkPolicy.Spec.Subject = npav1alpha1.AdminNetworkPolicySubject{Namespaces: &metav1.LabelSelector{}} + networkPolicy.Spec.Priority = 100 + networkPolicy.Spec.Egress = []npav1alpha1.AdminNetworkPolicyEgressRule{{ + Action: npav1alpha1.AdminNetworkPolicyRuleActionDeny, + To: []npav1alpha1.AdminNetworkPolicyEgressPeer{{ + Pods: &npav1alpha1.NamespacedPod{ + NamespaceSelector: metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}, + }, + }}, + Ports: &[]npav1alpha1.AdminNetworkPolicyPort{{ + PortRange: &npav1alpha1.PortRange{ + Protocol: v1.ProtocolTCP, + Start: 80, + End: 120, + }}}, + }} + }) + + npaMultiPortEgressPodNsSelector := makeAdminNetworkPolicyCustom("multiport-egress-pod-ns", "foo", + func(networkPolicy *npav1alpha1.AdminNetworkPolicy) { + networkPolicy.Spec.Subject = npav1alpha1.AdminNetworkPolicySubject{Namespaces: &metav1.LabelSelector{}} + networkPolicy.Spec.Priority = 100 + networkPolicy.Spec.Egress = []npav1alpha1.AdminNetworkPolicyEgressRule{{ + Action: npav1alpha1.AdminNetworkPolicyRuleActionDeny, + To: []npav1alpha1.AdminNetworkPolicyEgressPeer{{ + Pods: &npav1alpha1.NamespacedPod{ + NamespaceSelector: metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}, + PodSelector: metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}, + }, + }}, + Ports: &[]npav1alpha1.AdminNetworkPolicyPort{{ + PortRange: &npav1alpha1.PortRange{ + Protocol: v1.ProtocolTCP, + Start: 80, + End: 120, + }}}, + }} + }) + + npaMultiPortIngressPodNsSelector := makeAdminNetworkPolicyCustom("multiport-ingress-pod-ns", "bar", + func(networkPolicy *npav1alpha1.AdminNetworkPolicy) { + networkPolicy.Spec.Subject = npav1alpha1.AdminNetworkPolicySubject{Namespaces: &metav1.LabelSelector{}} + networkPolicy.Spec.Priority = 100 + networkPolicy.Spec.Ingress = []npav1alpha1.AdminNetworkPolicyIngressRule{{ + Action: npav1alpha1.AdminNetworkPolicyRuleActionDeny, + From: []npav1alpha1.AdminNetworkPolicyIngressPeer{{ + Pods: &npav1alpha1.NamespacedPod{ + NamespaceSelector: metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}, + PodSelector: metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}, + }, + }}, + Ports: &[]npav1alpha1.AdminNetworkPolicyPort{{ + PortRange: &npav1alpha1.PortRange{ + Protocol: v1.ProtocolTCP, + Start: 80, + End: 120, + }}}, + }} + }) + + tests := []struct { + name string + networkpolicy []*npav1alpha1.AdminNetworkPolicy + namespace []*v1.Namespace + pod []*v1.Pod + node []*v1.Node + p packet + expect npav1alpha1.AdminNetworkPolicyRuleAction + }{ + { + name: "no network policy", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.2.22"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionPass, + }, + { + name: "deny ingress", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyIngress}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.2.22"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionDeny, + }, + { + name: "deny egress", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyEgress}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.2.22"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.1.11"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionDeny, + }, + { + name: "deny egress on reply does not have effect", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyEgress}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.2.22"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionPass, + }, + { + name: "allow all override deny ingress", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyIngress, npaAllowAllIngress}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.2.22"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionDeny, + }, + { + name: "deny has higher priority", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyIngress, npaAllowAllIngressPod}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.2.22"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionDeny, + }, + { + name: "allow ingress", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaAllowAllIngressPod}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("10.0.0.1"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.2.22"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionAllow, + }, + + { + name: "multiport deny egress port", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyEgress, npaMultiPortEgress}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.2.22"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionAllow, + }, + { + name: "multiport allow egress port", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyEgress, npaMultiPortEgress}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.2.22"), + dstPort: 30080, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionAllow, + }, + { + name: "multiport allow egress", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyEgress, npaMultiPortEgress, npaMultiPortEgressCIDR}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.2.22"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionAllow, + }, + { + name: "multiport allow egress port selector not match ns", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyEgress, npaMultiPortEgress, npaMultiPortEgressPodSelector}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.2.22"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionAllow, + }, + { + name: "multiport allow egress port selector not match pod selector", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyEgress, npaMultiPortEgress, npaMultiPortEgressPodSelector}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.3.33"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionAllow, + }, + { + name: "multiport allow egress ns selector", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyEgress, npaMultiPortEgress, npaMultiPortEgressNsSelector}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.2.22"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionAllow, + }, + { + name: "multiport allow egress ns selector fail", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyEgress, npaMultiPortEgress, npaMultiPortEgressNsSelector}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.3.33"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionAllow, + }, + { + name: "multiport allow egress ns and pod selector", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyEgress, npaMultiPortEgress, npaMultiPortEgressPodNsSelector}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.2.22"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionAllow, + }, + { + name: "multiport allow egress ns and pod selector fail", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyEgress, npaMultiPortEgress, npaMultiPortEgressPodNsSelector}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.3.33"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionAllow, + }, + { + name: "multiport allow ingress ns and pod selector", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyIngress, npaMultiPortIngressPodNsSelector}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.2.22"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionAllow, + }, + { + name: "multiport allow ingress ns and pod selector fail", + networkpolicy: []*npav1alpha1.AdminNetworkPolicy{npaDefaultDenyIngress, npaMultiPortIngressPodNsSelector}, + namespace: []*v1.Namespace{makeNamespace("foo"), makeNamespace("bar")}, + pod: []*v1.Pod{podA, podB, podC, podD}, + p: packet{ + srcIP: net.ParseIP("192.168.1.11"), + srcPort: 52345, + dstIP: net.ParseIP("192.168.4.44"), + dstPort: 80, + proto: v1.ProtocolTCP, + }, + expect: npav1alpha1.AdminNetworkPolicyRuleActionAllow, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := newController() + // Add objects to the Store + for _, n := range tt.networkpolicy { + err := controller.networkpolicyStore.Add(n) + if err != nil { + t.Fatal(err) + } + } + for _, n := range tt.namespace { + err := controller.namespaceStore.Add(n) + if err != nil { + t.Fatal(err) + } + } + for _, p := range tt.pod { + err := controller.podStore.Add(p) + if err != nil { + t.Fatal(err) + } + } + + ok := controller.adminNetworkPolicyAction(tt.p) + if ok != tt.expect { + t.Errorf("expected %v got %v", ok, tt.expect) + } + + }) + } +} + +func Test_evaluateAdminNetworkPolicyPort(t *testing.T) { + tests := []struct { + name string + networkPolicyPorts []npav1alpha1.AdminNetworkPolicyPort + pod *v1.Pod + port int + protocol v1.Protocol + want bool + }{ + { + name: "empty", + pod: makePod("test", "nstest", "192.168.1.1"), + want: true, + }, + { + name: "match port", + networkPolicyPorts: []npav1alpha1.AdminNetworkPolicyPort{{ + PortNumber: &npav1alpha1.Port{ + Protocol: v1.ProtocolTCP, + Port: 80, + }, + }}, + pod: makePod("test", "nstest", "192.168.1.1"), + port: 80, + protocol: v1.ProtocolTCP, + want: true, + }, + { + name: "wrong port protocol", + networkPolicyPorts: []npav1alpha1.AdminNetworkPolicyPort{{ + PortNumber: &npav1alpha1.Port{ + Protocol: v1.ProtocolTCP, + Port: 80, + }, + }}, + pod: makePod("test", "nstest", "192.168.1.1"), + port: 80, + protocol: v1.ProtocolUDP, + want: false, + }, + { + name: "wrong port number", + networkPolicyPorts: []npav1alpha1.AdminNetworkPolicyPort{{ + PortNumber: &npav1alpha1.Port{ + Protocol: v1.ProtocolTCP, + Port: 80, + }, + }}, + pod: makePod("test", "nstest", "192.168.1.1"), + port: 443, + protocol: v1.ProtocolTCP, + want: false, + }, + { + name: "match port named", + networkPolicyPorts: []npav1alpha1.AdminNetworkPolicyPort{{ + NamedPort: ptr.To[string]("http"), + }}, + pod: makePod("test", "nstest", "192.168.1.1"), + port: 80, + protocol: v1.ProtocolTCP, + want: true, + }, + { + name: "match port range", + networkPolicyPorts: []npav1alpha1.AdminNetworkPolicyPort{{ + PortRange: &npav1alpha1.PortRange{ + Protocol: v1.ProtocolTCP, + Start: 80, + End: 120, + }, + }}, + pod: makePod("test", "nstest", "192.168.1.1"), + port: 80, + protocol: v1.ProtocolTCP, + want: true, + }, + { + name: "out port range", + networkPolicyPorts: []npav1alpha1.AdminNetworkPolicyPort{{ + PortRange: &npav1alpha1.PortRange{ + Protocol: v1.ProtocolTCP, + Start: 80, + End: 120, + }, + }}, + pod: makePod("test", "nstest", "192.168.1.1"), + port: 180, + protocol: v1.ProtocolTCP, + want: false, + }, + { + name: "inside port range but wrong protocol", + networkPolicyPorts: []npav1alpha1.AdminNetworkPolicyPort{{ + PortRange: &npav1alpha1.PortRange{ + Protocol: v1.ProtocolTCP, + Start: 80, + End: 120, + }, + }}, + pod: makePod("test", "nstest", "192.168.1.1"), + port: 90, + protocol: v1.ProtocolUDP, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := evaluateAdminNetworkPolicyPort(tt.networkPolicyPorts, tt.pod, tt.port, tt.protocol); got != tt.want { + t.Errorf("evaluateAdminNetworkPolicyPort() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/networkpolicy/controller.go b/pkg/networkpolicy/controller.go index 46fee1a..ea973dd 100644 --- a/pkg/networkpolicy/controller.go +++ b/pkg/networkpolicy/controller.go @@ -22,7 +22,12 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "sigs.k8s.io/knftables" + npav1alpha1 "sigs.k8s.io/network-policy-api/apis/v1alpha1" + npaclient "sigs.k8s.io/network-policy-api/pkg/client/clientset/versioned" + policyinformers "sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/apis/v1alpha1" + policylisters "sigs.k8s.io/network-policy-api/pkg/client/listers/apis/v1alpha1" ) // Network policies are hard to implement efficiently, and in large clusters this is @@ -47,8 +52,9 @@ const ( ) type Config struct { - FailOpen bool // allow traffic if the controller is not available - QueueID int + FailOpen bool // allow traffic if the controller is not available + AdminNetworkPolicy bool + QueueID int } // NewController returns a new *Controller. @@ -57,6 +63,9 @@ func NewController(client clientset.Interface, networkpolicyInformer networkinginformers.NetworkPolicyInformer, namespaceInformer coreinformers.NamespaceInformer, podInformer coreinformers.PodInformer, + nodeInformer coreinformers.NodeInformer, + npaClient npaclient.Interface, + adminNetworkPolicyInformer policyinformers.AdminNetworkPolicyInformer, config Config, ) *Controller { klog.V(2).Info("Creating event broadcaster") @@ -136,6 +145,13 @@ func NewController(client clientset.Interface, c.namespacesSynced = namespaceInformer.Informer().HasSynced c.networkpolicyLister = networkpolicyInformer.Lister() c.networkpoliciesSynced = networkpolicyInformer.Informer().HasSynced + if config.AdminNetworkPolicy { + c.npaClient = npaClient + c.nodeLister = nodeInformer.Lister() + c.nodesSynced = nodeInformer.Informer().HasSynced + c.adminNetworkPolicyLister = adminNetworkPolicyInformer.Lister() + c.adminNetworkPolicySynced = adminNetworkPolicyInformer.Informer().HasSynced + } c.eventBroadcaster = broadcaster c.eventRecorder = recorder @@ -159,6 +175,12 @@ type Controller struct { podLister corelisters.PodLister podsSynced cache.InformerSynced + npaClient npaclient.Interface + + adminNetworkPolicyLister policylisters.AdminNetworkPolicyLister + adminNetworkPolicySynced cache.InformerSynced + nodeLister corelisters.NodeLister + nodesSynced cache.InformerSynced // function to get the Pod given an IP // if an error or not found it returns nil getPodAssignedToIP func(podIP string) *v1.Pod @@ -178,7 +200,11 @@ func (c *Controller) Run(ctx context.Context) error { // Wait for the caches to be synced klog.Info("Waiting for informer caches to sync") - if !cache.WaitForNamedCacheSync(controllerName, ctx.Done(), c.networkpoliciesSynced, c.namespacesSynced, c.podsSynced) { + caches := []cache.InformerSynced{c.networkpoliciesSynced, c.namespacesSynced, c.podsSynced} + if c.config.AdminNetworkPolicy { + caches = append(caches, c.adminNetworkPolicySynced, c.nodesSynced) + } + if !cache.WaitForNamedCacheSync(controllerName, ctx.Done(), caches...) { return fmt.Errorf("error syncing cache") } @@ -268,6 +294,20 @@ func (c *Controller) Run(ctx context.Context) error { klog.V(2).Infof("Finished syncing packet %d took: %v accepted: %v", *a.PacketID, time.Since(startTime), verdict == nfqueue.NfAccept) }() + // Admin Network Policies are evaluted first + // https://network-policy-api.sigs.k8s.io/api-overview/#the-adminnetworkpolicy-resource + if c.config.AdminNetworkPolicy { + switch c.adminNetworkPolicyAction(packet) { + case npav1alpha1.AdminNetworkPolicyRuleActionDeny: + c.nfq.SetVerdict(*a.PacketID, nfqueue.NfDrop) //nolint:errcheck + return 0 + case npav1alpha1.AdminNetworkPolicyRuleActionAllow: + c.nfq.SetVerdict(*a.PacketID, nfqueue.NfAccept) //nolint:errcheck + return 0 + case npav1alpha1.AdminNetworkPolicyRuleActionPass: + } + } + // Network Policy if c.acceptNetworkPolicy(packet) { verdict = nfqueue.NfAccept diff --git a/pkg/networkpolicy/controller_test.go b/pkg/networkpolicy/controller_test.go new file mode 100644 index 0000000..648e63f --- /dev/null +++ b/pkg/networkpolicy/controller_test.go @@ -0,0 +1,112 @@ +package networkpolicy + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/knftables" + npaclientfake "sigs.k8s.io/network-policy-api/pkg/client/clientset/versioned/fake" + npainformers "sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions" +) + +func makeNamespace(name string) *v1.Namespace { + return &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "kubernetes.io/metadata.name": name, + "a": "b", + }, + }, + } +} + +func makeNode(name string) *v1.Node { + return &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "kubernetes.io/node": name, + "a": "b", + }, + }, + } +} + +func makePod(name, ns string, ip string) *v1.Pod { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Labels: map[string]string{ + "a": "b", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "write-pod", + Command: []string{"/bin/sh"}, + Ports: []v1.ContainerPort{{ + Name: "http", + ContainerPort: 80, + Protocol: v1.ProtocolTCP, + }}, + }, + }, + }, + Status: v1.PodStatus{ + PodIPs: []v1.PodIP{ + {IP: ip}, + }, + }, + } + + return pod + +} + +var ( + alwaysReady = func() bool { return true } + protocolTCP = v1.ProtocolTCP + protocolUDP = v1.ProtocolUDP +) + +type networkpolicyController struct { + *Controller + networkpolicyStore cache.Store + namespaceStore cache.Store + podStore cache.Store +} + +func newController() *networkpolicyController { + client := fake.NewSimpleClientset() + informersFactory := informers.NewSharedInformerFactory(client, 0) + + npaClient := npaclientfake.NewSimpleClientset() + npaInformerFactory := npainformers.NewSharedInformerFactory(npaClient, 0) + + controller := NewController(client, + &knftables.Fake{}, + informersFactory.Networking().V1().NetworkPolicies(), + informersFactory.Core().V1().Namespaces(), + informersFactory.Core().V1().Pods(), + informersFactory.Core().V1().Nodes(), + npaClient, + npaInformerFactory.Policy().V1alpha1().AdminNetworkPolicies(), + Config{ + AdminNetworkPolicy: true, + }, + ) + controller.networkpoliciesSynced = alwaysReady + controller.namespacesSynced = alwaysReady + controller.podsSynced = alwaysReady + return &networkpolicyController{ + controller, + informersFactory.Networking().V1().NetworkPolicies().Informer().GetStore(), + informersFactory.Core().V1().Namespaces().Informer().GetStore(), + informersFactory.Core().V1().Pods().Informer().GetStore(), + } +} diff --git a/pkg/networkpolicy/networkpolicy_test.go b/pkg/networkpolicy/networkpolicy_test.go index 1106859..615eec4 100644 --- a/pkg/networkpolicy/networkpolicy_test.go +++ b/pkg/networkpolicy/networkpolicy_test.go @@ -8,13 +8,9 @@ import ( networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/cache" "k8s.io/component-base/logs" "k8s.io/klog/v2" "k8s.io/utils/ptr" - "sigs.k8s.io/knftables" ) type netpolTweak func(networkPolicy *networkingv1.NetworkPolicy) @@ -44,84 +40,6 @@ func makePort(proto *v1.Protocol, port intstr.IntOrString, endPort int32) networ return r } -func makeNamespace(name string) *v1.Namespace { - return &v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{ - "kubernetes.io/metadata.name": name, - "a": "b", - }, - }, - } -} -func makePod(name, ns string, ip string) *v1.Pod { - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - Labels: map[string]string{ - "a": "b", - }, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "write-pod", - Command: []string{"/bin/sh"}, - Ports: []v1.ContainerPort{{ - Name: "http", - ContainerPort: 80, - Protocol: v1.ProtocolTCP, - }}, - }, - }, - }, - Status: v1.PodStatus{ - PodIPs: []v1.PodIP{ - {IP: ip}, - }, - }, - } - - return pod - -} - -var ( - alwaysReady = func() bool { return true } - protocolTCP = v1.ProtocolTCP - protocolUDP = v1.ProtocolUDP -) - -type networkpolicyController struct { - *Controller - networkpolicyStore cache.Store - namespaceStore cache.Store - podStore cache.Store -} - -func newController() *networkpolicyController { - client := fake.NewSimpleClientset() - informersFactory := informers.NewSharedInformerFactory(client, 0) - controller := NewController(client, - &knftables.Fake{}, - informersFactory.Networking().V1().NetworkPolicies(), - informersFactory.Core().V1().Namespaces(), - informersFactory.Core().V1().Pods(), - Config{}, - ) - controller.networkpoliciesSynced = alwaysReady - controller.namespacesSynced = alwaysReady - controller.podsSynced = alwaysReady - return &networkpolicyController{ - controller, - informersFactory.Networking().V1().NetworkPolicies().Informer().GetStore(), - informersFactory.Core().V1().Namespaces().Informer().GetStore(), - informersFactory.Core().V1().Pods().Informer().GetStore(), - } -} - func TestSyncPacket(t *testing.T) { _, err := logs.GlogSetter("4") if err != nil {