diff --git a/build/charts/antrea/crds/clusternetworkpolicy.yaml b/build/charts/antrea/crds/clusternetworkpolicy.yaml index 44236e82f32..a6e2cca2530 100644 --- a/build/charts/antrea/crds/clusternetworkpolicy.yaml +++ b/build/charts/antrea/crds/clusternetworkpolicy.yaml @@ -1076,11 +1076,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1352,11 +1358,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-aks.yml b/build/yamls/antrea-aks.yml index 0995e0d35af..f4bc9bb2162 100644 --- a/build/yamls/antrea-aks.yml +++ b/build/yamls/antrea-aks.yml @@ -1642,11 +1642,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1918,11 +1924,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-crds.yml b/build/yamls/antrea-crds.yml index 584c14ad030..e0497dcf8b2 100644 --- a/build/yamls/antrea-crds.yml +++ b/build/yamls/antrea-crds.yml @@ -1635,11 +1635,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1911,11 +1917,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-eks.yml b/build/yamls/antrea-eks.yml index 5d15e3393cc..bec701d3056 100644 --- a/build/yamls/antrea-eks.yml +++ b/build/yamls/antrea-eks.yml @@ -1642,11 +1642,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1918,11 +1924,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-gke.yml b/build/yamls/antrea-gke.yml index 5aadc9f617f..a4ae810f7b0 100644 --- a/build/yamls/antrea-gke.yml +++ b/build/yamls/antrea-gke.yml @@ -1642,11 +1642,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1918,11 +1924,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-ipsec.yml b/build/yamls/antrea-ipsec.yml index 928683a4a9a..393cb59da0a 100644 --- a/build/yamls/antrea-ipsec.yml +++ b/build/yamls/antrea-ipsec.yml @@ -1642,11 +1642,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1918,11 +1924,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea.yml b/build/yamls/antrea.yml index e921a844e6e..2451670ca39 100644 --- a/build/yamls/antrea.yml +++ b/build/yamls/antrea.yml @@ -1642,11 +1642,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1918,11 +1924,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/multicluster/build/yamls/antrea-multicluster-leader-global.yml b/multicluster/build/yamls/antrea-multicluster-leader-global.yml index ca5fcd07e5b..7cb1e1ab4a1 100644 --- a/multicluster/build/yamls/antrea-multicluster-leader-global.yml +++ b/multicluster/build/yamls/antrea-multicluster-leader-global.yml @@ -1143,9 +1143,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -1550,9 +1558,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -2107,9 +2123,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -2514,9 +2538,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -4054,9 +4086,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -4461,9 +4501,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -5018,9 +5066,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -5425,9 +5481,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the diff --git a/multicluster/build/yamls/antrea-multicluster-leader.yml b/multicluster/build/yamls/antrea-multicluster-leader.yml index 8c8a4c10ad8..38f2c43342c 100644 --- a/multicluster/build/yamls/antrea-multicluster-leader.yml +++ b/multicluster/build/yamls/antrea-multicluster-leader.yml @@ -1143,9 +1143,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -1550,9 +1558,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -2107,9 +2123,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -2514,9 +2538,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -4054,9 +4086,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -4461,9 +4501,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -5018,9 +5066,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -5425,9 +5481,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the diff --git a/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceexports.yaml b/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceexports.yaml index 9140f80d34f..4bd6104aace 100644 --- a/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceexports.yaml +++ b/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceexports.yaml @@ -733,9 +733,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -1140,9 +1148,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -1697,9 +1713,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -2104,9 +2128,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the diff --git a/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceimports.yaml b/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceimports.yaml index fafe9bec89f..4c5fe68f0a6 100644 --- a/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceimports.yaml +++ b/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceimports.yaml @@ -731,9 +731,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -1138,9 +1146,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -1695,9 +1711,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -2102,9 +2126,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the diff --git a/multicluster/test/e2e/antreapolicy_test.go b/multicluster/test/e2e/antreapolicy_test.go index e35d6b8e5a0..9bb2f5ad79b 100644 --- a/multicluster/test/e2e/antreapolicy_test.go +++ b/multicluster/test/e2e/antreapolicy_test.go @@ -35,7 +35,7 @@ const ( var ( allPodsPerCluster []antreae2e.Pod perNamespacePods []string - perClusterNamespaces map[string]string + perClusterNamespaces map[string]antreae2e.TestNamespaceMeta podsByNamespace map[string][]antreae2e.Pod clusterK8sUtilsMap map[string]*antreae2e.KubernetesUtils ) @@ -53,10 +53,10 @@ func failOnError(err error, t *testing.T) { // initializeForPolicyTest creates three Pods in three test Namespaces for each test cluster. func initializeForPolicyTest(t *testing.T, data *MCTestData) { perNamespacePods = []string{"a", "b", "c"} - perClusterNamespaces = make(map[string]string) - perClusterNamespaces["x"] = "x" - perClusterNamespaces["y"] = "y" - perClusterNamespaces["z"] = "z" + perClusterNamespaces = make(map[string]antreae2e.TestNamespaceMeta) + for _, ns := range []string{"x", "y", "z"} { + perClusterNamespaces[ns] = antreae2e.TestNamespaceMeta{Name: ns} + } allPodsPerCluster = []antreae2e.Pod{} podsByNamespace = make(map[string][]antreae2e.Pod) @@ -64,8 +64,8 @@ func initializeForPolicyTest(t *testing.T, data *MCTestData) { for _, podName := range perNamespacePods { for _, ns := range perClusterNamespaces { - allPodsPerCluster = append(allPodsPerCluster, antreae2e.NewPod(ns, podName)) - podsByNamespace[ns] = append(podsByNamespace[ns], antreae2e.NewPod(ns, podName)) + allPodsPerCluster = append(allPodsPerCluster, antreae2e.NewPod(ns.Name, podName)) + podsByNamespace[ns.Name] = append(podsByNamespace[ns.Name], antreae2e.NewPod(ns.Name, podName)) } } for clusterName := range data.clusterTestDataMap { diff --git a/pkg/apis/crd/v1beta1/types.go b/pkg/apis/crd/v1beta1/types.go index 46c1280d883..cdee4779428 100644 --- a/pkg/apis/crd/v1beta1/types.go +++ b/pkg/apis/crd/v1beta1/types.go @@ -626,8 +626,14 @@ type AppliedTo struct { NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` } +// PeerNamespaces describes criteria for selecting Pod/ExternalEntity +// from matched Namespaces. Only one of the criteria can be set. type PeerNamespaces struct { + // Selects from the same Namespace of the appliedTo workloads. Match NamespaceMatchType `json:"match,omitempty"` + // Selects Namespaces that share the same values for the given set of label keys + // with the appliedTo Namespace. Namespaces must have all the label keys. + SameLabels []string `json:"sameLabels,omitempty"` } // NamespaceMatchType describes Namespace matching strategy. diff --git a/pkg/apis/crd/v1beta1/zz_generated.deepcopy.go b/pkg/apis/crd/v1beta1/zz_generated.deepcopy.go index 7cde65971e8..219658c1ed8 100644 --- a/pkg/apis/crd/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/crd/v1beta1/zz_generated.deepcopy.go @@ -1153,7 +1153,7 @@ func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) { if in.Namespaces != nil { in, out := &in.Namespaces, &out.Namespaces *out = new(PeerNamespaces) - **out = **in + (*in).DeepCopyInto(*out) } if in.ExternalEntitySelector != nil { in, out := &in.ExternalEntitySelector, &out.ExternalEntitySelector @@ -1400,6 +1400,11 @@ func (in *Packet) DeepCopy() *Packet { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PeerNamespaces) DeepCopyInto(out *PeerNamespaces) { *out = *in + if in.SameLabels != nil { + in, out := &in.SameLabels, &out.SameLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/pkg/apiserver/openapi/zz_generated.openapi.go b/pkg/apiserver/openapi/zz_generated.openapi.go index 22779c68457..9ba7fb82404 100644 --- a/pkg/apiserver/openapi/zz_generated.openapi.go +++ b/pkg/apiserver/openapi/zz_generated.openapi.go @@ -5221,12 +5221,29 @@ func schema_pkg_apis_crd_v1beta1_PeerNamespaces(ref common.ReferenceCallback) co return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, + Description: "PeerNamespaces describes criteria for selecting Pod/ExternalEntity from matched Namespaces. Only one of the criteria can be set.", + Type: []string{"object"}, Properties: map[string]spec.Schema{ "match": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Description: "Selects from the same Namespace of the appliedTo workloads.", + Type: []string{"string"}, + Format: "", + }, + }, + "sameLabels": { + SchemaProps: spec.SchemaProps{ + Description: "Selects Namespaces that share the same values for the given set of label keys with the appliedTo Namespace. Namespaces must have all the label keys.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, }, diff --git a/pkg/controller/networkpolicy/clusternetworkpolicy.go b/pkg/controller/networkpolicy/clusternetworkpolicy.go index f711232bfa0..6e83f2a2f8c 100644 --- a/pkg/controller/networkpolicy/clusternetworkpolicy.go +++ b/pkg/controller/networkpolicy/clusternetworkpolicy.go @@ -16,6 +16,8 @@ package networkpolicy import ( "reflect" + "sort" + "strings" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -33,6 +35,10 @@ import ( utilsets "antrea.io/antrea/pkg/util/sets" ) +const ( + labelValueSeparator = "," +) + func getACNPReference(cnp *crdv1beta1.ClusterNetworkPolicy) *controlplane.NetworkPolicyReference { return &controlplane.NetworkPolicyReference{ Type: controlplane.AntreaClusterNetworkPolicy, @@ -111,7 +117,7 @@ func (n *NetworkPolicyController) filterPerNamespaceRuleACNPsByNSLabels(nsLabels peerNamespacesSelectorExists := func(peers []crdv1beta1.NetworkPolicyPeer) bool { for _, peer := range peers { - if peer.Namespaces != nil && peer.Namespaces.Match == crdv1beta1.NamespaceMatchSelf { + if peer.Namespaces != nil { return true } } @@ -119,16 +125,13 @@ func (n *NetworkPolicyController) filterPerNamespaceRuleACNPsByNSLabels(nsLabels } affectedPolicies := sets.New[string]() - objs, _ := n.acnpInformer.Informer().GetIndexer().ByIndex(perNamespaceRuleIndex, HasPerNamespaceRule) + objs, _ := n.acnpInformer.Informer().GetIndexer().ByIndex(perNamespaceRuleIndex, indexValueTrue) for _, obj := range objs { cnp := obj.(*crdv1beta1.ClusterNetworkPolicy) if affected := func() bool { if len(cnp.Spec.AppliedTo) > 0 { // The policy has only spec level AppliedTo. - if namespaceLabelMatches(cnp.Spec.AppliedTo) { - return true - } - return false + return namespaceLabelMatches(cnp.Spec.AppliedTo) } // The policy has rule level AppliedTo. // It needs to check each rule's peers. If any peer of the rule has PeerNamespaces selector and its @@ -151,6 +154,36 @@ func (n *NetworkPolicyController) filterPerNamespaceRuleACNPsByNSLabels(nsLabels return affectedPolicies } +// getACNPsWithRulesMatchingAnyLabelKey gets all ACNPs that have relevant rules based on Namespace label keys. +func (n *NetworkPolicyController) getACNPsWithRulesMatchingAnyLabelKey(labelKeys sets.Set[string]) sets.Set[string] { + matchedPolicyNames := sets.New[string]() + for k := range labelKeys { + objs, _ := n.acnpInformer.Informer().GetIndexer().ByIndex(namespaceRuleLabelKeyIndex, k) + for _, obj := range objs { + cnp := obj.(*crdv1beta1.ClusterNetworkPolicy) + matchedPolicyNames.Insert(cnp.Name) + } + } + return matchedPolicyNames +} + +// getACNPsWithRulesMatchingAnyUpdatedLabels gets all ACNPs that have rules based on Namespace +// label keys, which have changes in value across Namespace update. +func (n *NetworkPolicyController) getACNPsWithRulesMatchingAnyUpdatedLabels(oldNSLabels, newNSLabels map[string]string) sets.Set[string] { + updatedLabelKeys := sets.New[string]() + for k, v := range oldNSLabels { + if v2, ok := newNSLabels[k]; !ok || v2 != v { + updatedLabelKeys.Insert(k) + } + } + for k, v2 := range newNSLabels { + if v, ok := oldNSLabels[k]; !ok || v != v2 { + updatedLabelKeys.Insert(k) + } + } + return n.getACNPsWithRulesMatchingAnyLabelKey(updatedLabelKeys) +} + // addNamespace receives Namespace ADD events and triggers all ClusterNetworkPolicies that have a // per-namespace rule applied to this Namespace to be re-processed. func (n *NetworkPolicyController) addNamespace(obj interface{}) { @@ -179,6 +212,10 @@ func (n *NetworkPolicyController) updateNamespace(oldObj, curObj interface{}) { affectedACNPsByOldLabels := n.filterPerNamespaceRuleACNPsByNSLabels(oldNamespace.Labels) affectedACNPsByCurLabels := n.filterPerNamespaceRuleACNPsByNSLabels(curNamespace.Labels) affectedACNPs := utilsets.SymmetricDifferenceString(affectedACNPsByOldLabels, affectedACNPsByCurLabels) + // Any ACNPs that has Namespace label rules that refers to the label key set that has + // changed during the Namespace update will need to be re-processed. + acnpsWithRulesMatchingNSLabelKeys := n.getACNPsWithRulesMatchingAnyUpdatedLabels(oldNamespace.Labels, curNamespace.Labels) + affectedACNPs = affectedACNPs.Union(acnpsWithRulesMatchingNSLabelKeys) for cnpName := range affectedACNPs { // Ignore the ClusterNetworkPolicy if it has been removed during the process. if cnp, err := n.acnpLister.Get(cnpName); err == nil { @@ -336,9 +373,10 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl addressGroups := map[string]*antreatypes.AddressGroup{} // If appliedTo is set at spec level and the ACNP has per-namespace rules, then each appliedTo needs // to be split into appliedToGroups for each of its affected Namespace. - var clusterAppliedToAffectedNS []string - // atgForNamespace is the appliedToGroups split by Namespaces. - var atgForNamespace []*antreatypes.AppliedToGroup + atgPerAffectedNS := map[string]*antreatypes.AppliedToGroup{} + // When appliedTo is set at spec level and the ACNP has rules that select peer Namespaces by sameLabels, + // this field tracks the labels of all Namespaces selected by the appliedTo. + labelsPerAffectedNS := map[string]labels.Set{} // clusterSetScopeSelectorKeys keeps track of all the ClusterSet-scoped selector keys of the policy. // During policy peer processing, any ClusterSet-scoped selector will be registered with the // labelIdentityInterface and added to this set. By the end of the function, this set will @@ -349,15 +387,14 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl if at.ServiceAccount != nil { atg := n.createAppliedToGroup(at.ServiceAccount.Namespace, serviceAccountNameToPodSelector(at.ServiceAccount.Name), nil, nil, nil) appliedToGroups = mergeAppliedToGroups(appliedToGroups, atg) - clusterAppliedToAffectedNS = append(clusterAppliedToAffectedNS, at.ServiceAccount.Namespace) - atgForNamespace = append(atgForNamespace, atg) + atgPerAffectedNS[at.ServiceAccount.Namespace] = atg + labelsPerAffectedNS[at.ServiceAccount.Namespace] = n.getNamespaceLabels(at.ServiceAccount.Namespace) } else { - affectedNS := n.getAffectedNamespacesForAppliedTo(at) - for _, ns := range affectedNS { + labelsPerAffectedNS = n.getAffectedNamespacesForAppliedTo(at) + for ns := range labelsPerAffectedNS { atg := n.createAppliedToGroup(ns, at.PodSelector, nil, at.ExternalEntitySelector, nil) appliedToGroups = mergeAppliedToGroups(appliedToGroups, atg) - clusterAppliedToAffectedNS = append(clusterAppliedToAffectedNS, ns) - atgForNamespace = append(atgForNamespace, atg) + atgPerAffectedNS[ns] = atg } } } @@ -366,7 +403,7 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl processRules := func(cnpRules []crdv1beta1.Rule, direction controlplane.Direction) { for idx, cnpRule := range cnpRules { services, namedPortExists := toAntreaServicesForCRD(cnpRule.Ports, cnpRule.Protocols) - clusterPeers, perNSPeers := splitPeersByScope(cnpRule, direction) + clusterPeers, perNSPeers, nsLabelPeers := splitPeersByScope(cnpRule, direction) addRule := func(peer *controlplane.NetworkPolicyPeer, ruleAddressGroups []*antreatypes.AddressGroup, dir controlplane.Direction, ruleAppliedTos []*antreatypes.AppliedToGroup) { rule := controlplane.NetworkPolicyRule{ Direction: dir, @@ -390,7 +427,7 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl } // When a rule's NetworkPolicyPeer is empty, a cluster level rule should be created // with an Antrea peer matching all addresses. - if len(clusterPeers) > 0 || len(perNSPeers) == 0 { + if len(clusterPeers) > 0 || len(perNSPeers)+len(nsLabelPeers) == 0 { ruleAppliedTos := cnpRule.AppliedTo // For ACNPs that have per-namespace rules, cluster-level rules will be created with appliedTo // set as the spec appliedTo for each rule. @@ -412,11 +449,11 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl if len(perNSPeers) > 0 { if len(cnp.Spec.AppliedTo) > 0 { // Create a rule for each affected Namespace of appliedTo at spec level - for i := range clusterAppliedToAffectedNS { - klog.V(4).Infof("Adding a new per-namespace rule with appliedTo %v for rule %d of %s", clusterAppliedToAffectedNS[i], idx, cnp.Name) - peer, ags, selKeys := n.toNamespacedPeerForCRD(perNSPeers, cnp, clusterAppliedToAffectedNS[i]) + for ns, atg := range atgPerAffectedNS { + klog.V(4).Infof("Adding a new per-namespace rule with appliedTo %v for rule %d of %s", atg, idx, cnp.Name) + peer, ags, selKeys := n.toNamespacedPeerForCRD(perNSPeers, cnp, ns) clusterSetScopeSelectorKeys = clusterSetScopeSelectorKeys.Union(selKeys) - addRule(peer, ags, direction, []*antreatypes.AppliedToGroup{atgForNamespace[i]}) + addRule(peer, ags, direction, []*antreatypes.AppliedToGroup{atg}) } } else { // Create a rule for each affected Namespace of appliedTo at rule level @@ -429,7 +466,7 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl addRule(peer, ags, direction, []*antreatypes.AppliedToGroup{atg}) } else { affectedNS := n.getAffectedNamespacesForAppliedTo(at) - for _, ns := range affectedNS { + for ns := range affectedNS { atg := n.createAppliedToGroup(ns, at.PodSelector, nil, at.ExternalEntitySelector, nil) klog.V(4).Infof("Adding a new per-namespace rule with appliedTo %v for rule %d of %s", atg, idx, cnp.Name) peer, ags, selKeys := n.toNamespacedPeerForCRD(perNSPeers, cnp, ns) @@ -440,6 +477,43 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl } } } + if len(nsLabelPeers) > 0 { + if len(cnp.Spec.AppliedTo) > 0 { + // All affected Namespaces and their labels are already stored in labelsPerAffectedNS + for _, peer := range nsLabelPeers { + nsGroupByLabelVal := groupNamespacesByLabelValue(labelsPerAffectedNS, peer.Namespaces.SameLabels) + for labelValues, groupedNamespaces := range nsGroupByLabelVal { + peer, atgs, ags, selKeys := n.toAntreaPeerForSameLabelNamespaces(peer, cnp, atgPerAffectedNS, labelValues, groupedNamespaces) + clusterSetScopeSelectorKeys = clusterSetScopeSelectorKeys.Union(selKeys) + addRule(peer, ags, direction, atgs) + } + } + } else { + atgPerRuleAffectedNS := map[string]*antreatypes.AppliedToGroup{} + labelsPerRuleAffectedNS := map[string]labels.Set{} + for _, at := range cnpRule.AppliedTo { + if at.ServiceAccount != nil { + atg := n.createAppliedToGroup(at.ServiceAccount.Namespace, serviceAccountNameToPodSelector(at.ServiceAccount.Name), nil, nil, nil) + atgPerRuleAffectedNS[at.ServiceAccount.Namespace] = atg + labelsPerRuleAffectedNS[at.ServiceAccount.Namespace] = n.getNamespaceLabels(at.ServiceAccount.Namespace) + } else { + labelsPerRuleAffectedNS = n.getAffectedNamespacesForAppliedTo(at) + for ns := range labelsPerRuleAffectedNS { + atg := n.createAppliedToGroup(ns, at.PodSelector, nil, at.ExternalEntitySelector, nil) + atgPerRuleAffectedNS[ns] = atg + } + } + } + for _, peer := range nsLabelPeers { + nsGroupByLabelVal := groupNamespacesByLabelValue(labelsPerRuleAffectedNS, peer.Namespaces.SameLabels) + for labelValues, groupedNamespaces := range nsGroupByLabelVal { + peer, atgs, ags, selKeys := n.toAntreaPeerForSameLabelNamespaces(peer, cnp, atgPerRuleAffectedNS, labelValues, groupedNamespaces) + clusterSetScopeSelectorKeys = clusterSetScopeSelectorKeys.Union(selKeys) + addRule(peer, ags, direction, atgs) + } + } + } + } } } // Compute NetworkPolicyRules for Ingress Rules. @@ -484,14 +558,14 @@ func serviceAccountNameToPodSelector(saName string) *metav1.LabelSelector { func hasPerNamespaceRule(cnp *crdv1beta1.ClusterNetworkPolicy) bool { for _, ingress := range cnp.Spec.Ingress { for _, peer := range ingress.From { - if peer.Namespaces != nil && peer.Namespaces.Match == crdv1beta1.NamespaceMatchSelf { + if peer.Namespaces != nil { return true } } } for _, egress := range cnp.Spec.Egress { for _, peer := range egress.To { - if peer.Namespaces != nil && peer.Namespaces.Match == crdv1beta1.NamespaceMatchSelf { + if peer.Namespaces != nil { return true } } @@ -499,6 +573,126 @@ func hasPerNamespaceRule(cnp *crdv1beta1.ClusterNetworkPolicy) bool { return false } +func namespaceRuleLabelKeys(cnp *crdv1beta1.ClusterNetworkPolicy) sets.Set[string] { + keys := sets.New[string]() + for _, ingress := range cnp.Spec.Ingress { + for _, peer := range ingress.From { + if peer.Namespaces != nil { + for _, k := range peer.Namespaces.SameLabels { + keys.Insert(k) + } + } + } + } + for _, egress := range cnp.Spec.Egress { + for _, peer := range egress.To { + if peer.Namespaces != nil { + for _, k := range peer.Namespaces.SameLabels { + keys.Insert(k) + } + } + } + } + return keys +} + +func (n *NetworkPolicyController) getNamespaceLabels(ns string) labels.Set { + namespace, err := n.namespaceLister.Get(ns) + if err != nil { + // The Namespace referred to (by ServiceAccount etc.) does not exist yet. + // ACNP will be re-queued once that Namespace event is received. + return labels.Set{} + } + return namespace.Labels +} + +// groupNamespaceByLabelValue groups Namespaces if they have the same label value for all the +// label keys listed. If a Namespace is missing at least one of the label keys, it will not +// be grouped. Example: +// +// ns1: app=web, tier=test, tenant=t1 +// ns2: app=web, tier=test, tenant=t2 +// ns3: app=web, tier=production, tenant=t1 +// ns4: app=web, tier=production, tenant=t2 +// ns5: app=db, tenant=t1 +// labelKeys = [app, tier] +// Result after grouping: +// "web,test,": [ns1, ns2] +// "web,production,": [ns3, ns4] +func groupNamespacesByLabelValue(affectedNSAndLabels map[string]labels.Set, labelKeys []string) map[string][]string { + nsGroupedByLabelVal := map[string][]string{} + for ns, nsLabels := range affectedNSAndLabels { + if groupKey := getLabelValues(nsLabels, labelKeys); groupKey != "" { + nsGroupedByLabelVal[groupKey] = append(nsGroupedByLabelVal[groupKey], ns) + } + } + return nsGroupedByLabelVal +} + +func getLabelValues(labels map[string]string, labelKeys []string) string { + key := "" + for _, k := range labelKeys { + if v, ok := labels[k]; !ok { + return "" + } else { + key += v + labelValueSeparator + } + } + return key +} + +// convertSameLabelsToSelector creates a LabelSelector based on a list of label keys +// and their expected values. +func convertSameLabelsToSelector(labelKeys []string, labelValues string) *metav1.LabelSelector { + labelValuesSep := strings.Split(labelValues, labelValueSeparator) + labelMatchCriteria := map[string]string{} + for i := range labelKeys { + labelMatchCriteria[labelKeys[i]] = labelValuesSep[i] + } + return &metav1.LabelSelector{ + MatchLabels: labelMatchCriteria, + } +} + +// toAntreaPeerForSameLabelNamespaces computes the appliedToGroups and addressGroups for each +// group of Namespaces who have the same values for the sameLabels keys. +func (n *NetworkPolicyController) toAntreaPeerForSameLabelNamespaces(peer crdv1beta1.NetworkPolicyPeer, + np metav1.Object, atgPerAffectedNS map[string]*antreatypes.AppliedToGroup, + labelValues string, + namespacesByLabelValues []string) (*controlplane.NetworkPolicyPeer, []*antreatypes.AppliedToGroup, []*antreatypes.AddressGroup, sets.Set[string]) { + labelKeys := peer.Namespaces.SameLabels + var labelIdentities []uint32 + uniqueLabelIDs := sets.New[uint32]() + clusterSetScopeSelectorKeys := sets.New[string]() + // select Namespaces who, for specific label keys, have the same values as the appliedTo Namespaces. + nsSelForSameLabels := convertSameLabelsToSelector(labelKeys, labelValues) + addressGroups := []*antreatypes.AddressGroup{n.createAddressGroup("", peer.PodSelector, nsSelForSameLabels, peer.ExternalEntitySelector, nil)} + if n.stretchNPEnabled && peer.Scope == crdv1beta1.ScopeClusterSet { + newClusterSetScopeSelector := antreatypes.NewGroupSelector("", peer.PodSelector, nsSelForSameLabels, peer.ExternalEntitySelector, nil) + clusterSetScopeSelectorKeys.Insert(newClusterSetScopeSelector.NormalizedName) + // In addition to getting the matched Label Identity IDs, AddSelector also registers the selector + // with the labelIdentityInterface. + matchedLabelIDs := n.labelIdentityInterface.AddSelector(newClusterSetScopeSelector, internalNetworkPolicyKeyFunc(np)) + for _, id := range matchedLabelIDs { + uniqueLabelIDs.Insert(id) + } + } + for id := range uniqueLabelIDs { + labelIdentities = append(labelIdentities, id) + } + antreaPeer := &controlplane.NetworkPolicyPeer{ + AddressGroups: getAddressGroupNames(addressGroups), + LabelIdentities: labelIdentities, + } + var atgs []*antreatypes.AppliedToGroup + sort.Strings(namespacesByLabelValues) + for _, ns := range namespacesByLabelValues { + atgForNamespace, _ := atgPerAffectedNS[ns] + atgs = append(atgs, atgForNamespace) + } + return antreaPeer, atgs, addressGroups, clusterSetScopeSelectorKeys +} + // processClusterAppliedTo processes appliedTo groups in Antrea ClusterNetworkPolicy set // at cluster level (appliedTo groups which will not need to be split by Namespaces). func (n *NetworkPolicyController) processClusterAppliedTo(appliedTo []crdv1beta1.AppliedTo) []*antreatypes.AppliedToGroup { @@ -525,32 +719,36 @@ func (n *NetworkPolicyController) processClusterAppliedTo(appliedTo []crdv1beta1 // splitPeersByScope splits the ClusterNetworkPolicy peers in the rule by whether the peer // is cluster-scoped or per-namespace. -func splitPeersByScope(rule crdv1beta1.Rule, dir controlplane.Direction) ([]crdv1beta1.NetworkPolicyPeer, []crdv1beta1.NetworkPolicyPeer) { - var clusterPeers, perNSPeers []crdv1beta1.NetworkPolicyPeer +func splitPeersByScope(rule crdv1beta1.Rule, dir controlplane.Direction) ([]crdv1beta1.NetworkPolicyPeer, []crdv1beta1.NetworkPolicyPeer, []crdv1beta1.NetworkPolicyPeer) { + var clusterPeers, perNSPeers, nsLabelPeers []crdv1beta1.NetworkPolicyPeer peers := rule.From if dir == controlplane.DirectionOut { peers = rule.To } for _, peer := range peers { - if peer.Namespaces != nil && peer.Namespaces.Match == crdv1beta1.NamespaceMatchSelf { - perNSPeers = append(perNSPeers, peer) + if peer.Namespaces != nil { + if peer.Namespaces.Match == crdv1beta1.NamespaceMatchSelf { + perNSPeers = append(perNSPeers, peer) + } else if len(peer.Namespaces.SameLabels) > 0 { + nsLabelPeers = append(nsLabelPeers, peer) + } } else { clusterPeers = append(clusterPeers, peer) } } - return clusterPeers, perNSPeers + return clusterPeers, perNSPeers, nsLabelPeers } // getAffectedNamespacesForAppliedTo computes the Namespaces currently affected by the appliedTo -// Namespace selectors. -func (n *NetworkPolicyController) getAffectedNamespacesForAppliedTo(appliedTo crdv1beta1.AppliedTo) []string { - var affectedNS []string +// Namespace selectors, and returns these Namespaces along with their labels. +func (n *NetworkPolicyController) getAffectedNamespacesForAppliedTo(appliedTo crdv1beta1.AppliedTo) map[string]labels.Set { + affectedNSAndLabels := map[string]labels.Set{} nsLabelSelector := appliedTo.NamespaceSelector if appliedTo.Group != "" { cg, err := n.cgLister.Get(appliedTo.Group) if err != nil { - return affectedNS + return affectedNSAndLabels } if cg.Spec.NamespaceSelector != nil || cg.Spec.PodSelector != nil { nsLabelSelector = cg.Spec.NamespaceSelector @@ -563,9 +761,9 @@ func (n *NetworkPolicyController) getAffectedNamespacesForAppliedTo(appliedTo cr } namespaces, _ := n.namespaceLister.List(nsSel) for _, ns := range namespaces { - affectedNS = append(affectedNS, ns.Name) + affectedNSAndLabels[ns.Name] = ns.Labels } - return affectedNS + return affectedNSAndLabels } // processInternalGroupForRule examines the internal group (and its childGroups if applicable) diff --git a/pkg/controller/networkpolicy/clusternetworkpolicy_test.go b/pkg/controller/networkpolicy/clusternetworkpolicy_test.go index 3562c861196..3241723ec1a 100644 --- a/pkg/controller/networkpolicy/clusternetworkpolicy_test.go +++ b/pkg/controller/networkpolicy/clusternetworkpolicy_test.go @@ -24,6 +24,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "antrea.io/antrea/multicluster/controllers/multicluster/common" @@ -55,6 +56,12 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { Labels: map[string]string{"foo2": "bar2"}, }, } + nsC := v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nsC", + Labels: map[string]string{"foo2": "bar2"}, + }, + } svcA := v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -796,6 +803,21 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { Priority: 0, Action: &allowAction, }, + { + Direction: controlplane.DirectionIn, + AppliedToGroups: []string{getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName)}, + From: controlplane.NetworkPolicyPeer{ + AddressGroups: []string{getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName)}, + }, + Services: []controlplane.Service{ + { + Protocol: &protocolTCP, + Port: &int80, + }, + }, + Priority: 0, + Action: &allowAction, + }, { Direction: controlplane.DirectionIn, AppliedToGroups: []string{getNormalizedUID(antreatypes.NewGroupSelector("", nil, &metav1.LabelSelector{}, nil, nil).NormalizedName)}, @@ -815,12 +837,13 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { AppliedToGroups: []string{ getNormalizedUID(antreatypes.NewGroupSelector("nsA", nil, nil, nil, nil).NormalizedName), getNormalizedUID(antreatypes.NewGroupSelector("nsB", nil, nil, nil, nil).NormalizedName), + getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName), getNormalizedUID(antreatypes.NewGroupSelector("", nil, &metav1.LabelSelector{}, nil, nil).NormalizedName), }, AppliedToPerRule: true, }, - expectedAppliedToGroups: 3, - expectedAddressGroups: 3, + expectedAppliedToGroups: 4, + expectedAddressGroups: 4, }, { name: "with-per-namespace-rule-applied-to-per-rule", @@ -915,15 +938,103 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { Priority: 1, Action: &dropAction, }, + { + Direction: controlplane.DirectionIn, + AppliedToGroups: []string{getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName)}, + From: controlplane.NetworkPolicyPeer{ + AddressGroups: []string{getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName)}, + }, + Services: []controlplane.Service{ + { + Protocol: &protocolTCP, + Port: &int81, + }, + }, + Priority: 1, + Action: &dropAction, + }, }, AppliedToGroups: []string{ getNormalizedUID(antreatypes.NewGroupSelector("nsA", &selectorA, nil, nil, nil).NormalizedName), getNormalizedUID(antreatypes.NewGroupSelector("nsB", nil, nil, nil, nil).NormalizedName), + getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName), }, AppliedToPerRule: true, }, - expectedAppliedToGroups: 2, - expectedAddressGroups: 2, + expectedAppliedToGroups: 3, + expectedAddressGroups: 3, + }, + { + name: "with-same-labels-namespace-rule", + inputPolicy: &crdv1beta1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "cnpS", UID: "uidS"}, + Spec: crdv1beta1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1beta1.AppliedTo{ + { + NamespaceSelector: &metav1.LabelSelector{}, + }, + }, + Priority: p10, + Ingress: []crdv1beta1.Rule{ + { + Ports: []crdv1beta1.NetworkPolicyPort{ + { + Port: &int80, + }, + }, + From: []crdv1beta1.NetworkPolicyPeer{ + { + Namespaces: &crdv1beta1.PeerNamespaces{ + SameLabels: []string{"foo2"}, + }, + }, + }, + Action: &allowAction, + }, + }, + }, + }, + expectedPolicy: &antreatypes.NetworkPolicy{ + UID: "uidS", + Name: "uidS", + SourceRef: &controlplane.NetworkPolicyReference{ + Type: controlplane.AntreaClusterNetworkPolicy, + Name: "cnpS", + UID: "uidS", + }, + Priority: &p10, + TierPriority: &DefaultTierPriority, + Rules: []controlplane.NetworkPolicyRule{ + { + Direction: controlplane.DirectionIn, + AppliedToGroups: []string{ + getNormalizedUID(antreatypes.NewGroupSelector("nsB", nil, nil, nil, nil).NormalizedName), + getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName), + }, + From: controlplane.NetworkPolicyPeer{ + AddressGroups: []string{ + getNormalizedUID(antreatypes.NewGroupSelector("", nil, &selectorB, nil, nil).NormalizedName), + }, + }, + Services: []controlplane.Service{ + { + Protocol: &protocolTCP, + Port: &int80, + }, + }, + Priority: 0, + Action: &allowAction, + }, + }, + AppliedToGroups: []string{ + getNormalizedUID(antreatypes.NewGroupSelector("nsA", nil, nil, nil, nil).NormalizedName), + getNormalizedUID(antreatypes.NewGroupSelector("nsB", nil, nil, nil, nil).NormalizedName), + getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName), + }, + AppliedToPerRule: true, + }, + expectedAppliedToGroups: 3, + expectedAddressGroups: 1, }, { name: "rule-with-to-service", @@ -1782,6 +1893,7 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { c.cgStore.Add(&cgA) c.namespaceStore.Add(&nsA) c.namespaceStore.Add(&nsB) + c.namespaceStore.Add(&nsC) c.serviceStore.Add(&svcA) c.tierStore.Add(&tierA) actualPolicy, actualAppliedToGroups, actualAddressGroups := c.processClusterNetworkPolicy(tt.inputPolicy) @@ -1799,9 +1911,9 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { } } -func TestAddCNP(t *testing.T) { +func TestAddACNP(t *testing.T) { _, npc := newController(nil, nil) - cnp := getCNP() + cnp := getACNP() npc.addCNP(cnp) require.Equal(t, 1, npc.internalNetworkPolicyQueue.Len()) key, done := npc.internalNetworkPolicyQueue.Get() @@ -1810,9 +1922,9 @@ func TestAddCNP(t *testing.T) { assert.False(t, done) } -func TestUpdateCNP(t *testing.T) { +func TestUpdateACNP(t *testing.T) { _, npc := newController(nil, nil) - cnp := getCNP() + cnp := getACNP() newCNP := cnp.DeepCopy() // Make a change to the CNP. newCNP.Annotations = map[string]string{"foo": "bar"} @@ -1824,9 +1936,9 @@ func TestUpdateCNP(t *testing.T) { assert.False(t, done) } -func TestDeleteCNP(t *testing.T) { +func TestDeleteACNP(t *testing.T) { _, npc := newController(nil, nil) - cnp := getCNP() + cnp := getACNP() npc.deleteCNP(cnp) require.Equal(t, 1, npc.internalNetworkPolicyQueue.Len()) key, done := npc.internalNetworkPolicyQueue.Get() @@ -2060,7 +2172,7 @@ func TestProcessRefGroupOrClusterGroup(t *testing.T) { // util functions for testing. -func getCNP() *crdv1beta1.ClusterNetworkPolicy { +func getACNP() *crdv1beta1.ClusterNetworkPolicy { p10 := float64(10) allowAction := crdv1beta1.RuleActionAllow selectorA := metav1.LabelSelector{MatchLabels: map[string]string{"foo1": "bar1"}} @@ -2214,3 +2326,98 @@ func TestFilterPerNamespaceRuleACNPsByNSLabels(t *testing.T) { }) } } + +func TestGetACNPsWithRulesMatchingLabelKeysAcrossNSUpdate(t *testing.T) { + acnp1 := &crdv1beta1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "acnp-with-tier-label-rule"}, + Spec: crdv1beta1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1beta1.AppliedTo{ + { + NamespaceSelector: &metav1.LabelSelector{}, + }, + }, + Ingress: []crdv1beta1.Rule{ + { + From: []crdv1beta1.NetworkPolicyPeer{ + { + Namespaces: &crdv1beta1.PeerNamespaces{ + SameLabels: []string{"tier"}, + }, + }, + }, + }, + }, + }, + } + acnp2 := &crdv1beta1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "acnp-with-tier-and-purpose-label-rule"}, + Spec: crdv1beta1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1beta1.AppliedTo{ + { + NamespaceSelector: &metav1.LabelSelector{}, + }, + }, + Ingress: []crdv1beta1.Rule{ + { + From: []crdv1beta1.NetworkPolicyPeer{ + { + Namespaces: &crdv1beta1.PeerNamespaces{ + SameLabels: []string{"tier", "purpose"}, + }, + }, + }, + }, + }, + }, + } + tests := []struct { + name string + oldNSLabels labels.Set + newNSLabels labels.Set + want sets.Set[string] + }{ + { + name: "Namespace updated to have tier label", + oldNSLabels: map[string]string{ + "kubernetes.io/metadata.name": "ns1", + }, + newNSLabels: map[string]string{ + "kubernetes.io/metadata.name": "ns1", + "tier": "production", + }, + want: sets.New[string](acnp1.Name, acnp2.Name), + }, + { + name: "Namespace updated to have purpose label", + oldNSLabels: map[string]string{ + "kubernetes.io/metadata.name": "ns2", + }, + newNSLabels: map[string]string{ + "kubernetes.io/metadata.name": "ns2", + "purpose": "test", + }, + want: sets.New[string](acnp2.Name), + }, + { + name: "Namespace updated for irrelevant label", + oldNSLabels: map[string]string{ + "kubernetes.io/metadata.name": "ns3", + "tier": "production", + }, + newNSLabels: map[string]string{ + "kubernetes.io/metadata.name": "ns2", + "tier": "production", + "owned-by": "dev-team", + }, + want: sets.New[string](), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, c := newController(nil, []runtime.Object{acnp1, acnp2}) + c.acnpStore.Add(acnp1) + c.acnpStore.Add(acnp2) + assert.Equal(t, tt.want, c.getACNPsWithRulesMatchingAnyUpdatedLabels(tt.oldNSLabels, tt.newNSLabels)) + }) + } +} diff --git a/pkg/controller/networkpolicy/networkpolicy_controller.go b/pkg/controller/networkpolicy/networkpolicy_controller.go index af1edbcf773..ca5458900e2 100644 --- a/pkg/controller/networkpolicy/networkpolicy_controller.go +++ b/pkg/controller/networkpolicy/networkpolicy_controller.go @@ -93,8 +93,9 @@ const ( addressGroupType grouping.GroupType = "addressGroup" internalGroupType grouping.GroupType = "internalGroup" - perNamespaceRuleIndex = "hasPerNamespaceRule" - HasPerNamespaceRule = "true" + perNamespaceRuleIndex = "hasPerNamespaceRule" + namespaceRuleLabelKeyIndex = "namespaceRuleLabelKeys" + indexValueTrue = "true" ) var ( @@ -333,12 +334,18 @@ var acnpIndexers = cache.Indexers{ if !ok { return []string{}, nil } - has := hasPerNamespaceRule(acnp) - if has { - return []string{HasPerNamespaceRule}, nil + if hasPerNamespaceRule(acnp) { + return []string{indexValueTrue}, nil } return []string{}, nil }, + namespaceRuleLabelKeyIndex: func(obj interface{}) ([]string, error) { + cnp, ok := obj.(*secv1beta1.ClusterNetworkPolicy) + if !ok { + return []string{}, nil + } + return namespaceRuleLabelKeys(cnp).UnsortedList(), nil + }, } var annpIndexers = cache.Indexers{ diff --git a/pkg/controller/networkpolicy/validate.go b/pkg/controller/networkpolicy/validate.go index 5cd0e01071c..5639866e452 100644 --- a/pkg/controller/networkpolicy/validate.go +++ b/pkg/controller/networkpolicy/validate.go @@ -656,6 +656,16 @@ func (v *antreaPolicyValidator) validatePeers(ingress, egress []crdv1beta1.Rule) if peer.NamespaceSelector != nil && peer.Namespaces != nil { return "namespaces and namespaceSelector cannot be set at the same time for a single NetworkPolicyPeer", false } + if peer.Namespaces != nil { + if numFieldsSetInStruct(*peer.Namespaces) > 1 { + return "only one matching criteria can be specified in a single peer namespaces field", false + } + for _, k := range peer.Namespaces.SameLabels { + if err := validation.IsQualifiedName(k); err != nil { + return fmt.Sprintf("Invalid label key in sameLabels rule: %s", k), false + } + } + } peerFieldsNum := numFieldsSetInStruct(peer) if peer.Group != "" && peerFieldsNum > 1 { return "group cannot be set with other peers in rules", false diff --git a/pkg/controller/networkpolicy/validate_test.go b/pkg/controller/networkpolicy/validate_test.go index 43b20fe13ca..271da1e06c1 100644 --- a/pkg/controller/networkpolicy/validate_test.go +++ b/pkg/controller/networkpolicy/validate_test.go @@ -703,6 +703,65 @@ func TestValidateAntreaClusterNetworkPolicy(t *testing.T) { operation: admv1.Create, expectedReason: "namespaces and namespaceSelector cannot be set at the same time for a single NetworkPolicyPeer", }, + { + name: "acnp-double-peer-namespace-field", + policy: &crdv1beta1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acnp-double-peer-namespace-field", + }, + Spec: crdv1beta1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1beta1.AppliedTo{ + { + NamespaceSelector: &metav1.LabelSelector{}, + }, + }, + Ingress: []crdv1beta1.Rule{ + { + Action: &allowAction, + From: []crdv1beta1.NetworkPolicyPeer{ + { + Namespaces: &crdv1beta1.PeerNamespaces{ + Match: crdv1beta1.NamespaceMatchSelf, + SameLabels: []string{"test"}, + }, + }, + }, + }, + }, + }, + }, + operation: admv1.Create, + expectedReason: "only one matching criteria can be specified in a single peer namespaces field", + }, + { + name: "acnp-invalid-rule-samelabels-key", + policy: &crdv1beta1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acnp-invalid-rule-samelabels-key", + }, + Spec: crdv1beta1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1beta1.AppliedTo{ + { + NamespaceSelector: &metav1.LabelSelector{}, + }, + }, + Ingress: []crdv1beta1.Rule{ + { + Action: &allowAction, + From: []crdv1beta1.NetworkPolicyPeer{ + { + Namespaces: &crdv1beta1.PeerNamespaces{ + SameLabels: []string{"&illegalKey"}, + }, + }, + }, + }, + }, + }, + }, + operation: admv1.Update, + expectedReason: "Invalid label key in sameLabels rule: &illegalKey", + }, { name: "acnp-toservice-set-with-to", policy: &crdv1beta1.ClusterNetworkPolicy{ diff --git a/test/e2e/antreaipam_anp_test.go b/test/e2e/antreaipam_anp_test.go index 11d6cd14866..7e6e191cca8 100644 --- a/test/e2e/antreaipam_anp_test.go +++ b/test/e2e/antreaipam_anp_test.go @@ -28,32 +28,26 @@ import ( // initializeAntreaIPAM must be called after Namespace in antreaIPAMNamespaces created func initializeAntreaIPAM(t *testing.T, data *TestData) { - p80 = 80 - p81 = 81 - p8080 = 8080 - p8081 = 8081 - p8082 = 8082 - p8085 = 8085 - pods = []string{"a", "b", "c"} - namespaces = make(map[string]string) - regularNamespaces := make(map[string]string) + podsPerNamespace = []string{"a", "b", "c"} + namespaces = make(map[string]TestNamespaceMeta) + regularNamespaces := make(map[string]TestNamespaceMeta) suffix := randName("") - namespaces["x"] = "antrea-x-" + suffix + namespaces["x"] = TestNamespaceMeta{ + Name: "antrea-x-" + suffix, + } regularNamespaces["x"] = namespaces["x"] // This function "initializeAntreaIPAM" will be used more than once, and variable "allPods" is global. // It should be empty every time when "initializeAntreaIPAM" is performed, otherwise there will be unexpected // results. allPods = []Pod{} podsByNamespace = make(map[string][]Pod) - for _, ns := range antreaIPAMNamespaces { - namespaces[ns] = ns + namespaces[ns] = TestNamespaceMeta{Name: ns} } - - for _, podName := range pods { + for _, podName := range podsPerNamespace { for _, ns := range namespaces { - allPods = append(allPods, NewPod(ns, podName)) - podsByNamespace[ns] = append(podsByNamespace[ns], NewPod(ns, podName)) + allPods = append(allPods, NewPod(ns.Name, podName)) + podsByNamespace[ns.Name] = append(podsByNamespace[ns.Name], NewPod(ns.Name, podName)) } } @@ -61,9 +55,9 @@ func initializeAntreaIPAM(t *testing.T, data *TestData) { // k8sUtils is a global var k8sUtils, err = NewKubernetesUtils(data) failOnError(err, t) - _, err = k8sUtils.Bootstrap(regularNamespaces, pods, true, nil, nil) + _, err = k8sUtils.Bootstrap(regularNamespaces, podsPerNamespace, true, nil, nil) failOnError(err, t) - ips, err := k8sUtils.Bootstrap(namespaces, pods, false, nil, nil) + ips, err := k8sUtils.Bootstrap(namespaces, podsPerNamespace, false, nil, nil) failOnError(err, t) podIPs = ips } @@ -195,35 +189,29 @@ func testAntreaIPAMACNP(t *testing.T, protocol e2eutils.AntreaPolicyProtocol, ac SetAppliedToGroup([]e2eutils.ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "c"}}}) if isIngress { builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, nil, nil, ruleAction, "", "", nil) builder2.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, nil, nil, ruleAction, "", "", nil) builder3.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, nil, nil, ruleAction, "", "", nil) } else { builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, nil, nil, ruleAction, "", "", nil) builder2.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, nil, nil, ruleAction, "", "", nil) builder3.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, nil, nil, ruleAction, "", "", nil) } reachability := NewReachability(allPods, action) - for _, ns := range namespaces { - for _, pod := range []string{"/a", "/b", "/c"} { - reachability.Expect(Pod(ns+pod), Pod(ns+pod), Connected) - } - } + reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get(), builder2.Get(), builder3.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), builder2.Get(), builder3.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ diff --git a/test/e2e/antreapolicy_test.go b/test/e2e/antreapolicy_test.go index cef8e0b4639..ffcd5d0ca1e 100644 --- a/test/e2e/antreapolicy_test.go +++ b/test/e2e/antreapolicy_test.go @@ -45,15 +45,22 @@ import ( // common for all tests. var ( - allPods []Pod - podsByNamespace map[string][]Pod - k8sUtils *KubernetesUtils - allTestList []*TestCase - pods []string - namespaces map[string]string - podIPs map[string][]string - p80, p81, p8080, p8081, p8082, p8085, p6443 int32 - nodes map[string]string + p80 int32 = 80 + p81 int32 = 81 + p6443 int32 = 6443 + p8080 int32 = 8080 + p8081 int32 = 8081 + p8082 int32 = 8082 + p8085 int32 = 8085 + allPods []Pod + podsByNamespace map[string][]Pod + k8sUtils *KubernetesUtils + allTestList []*TestCase + podsPerNamespace []string + namespaces map[string]TestNamespaceMeta + podIPs map[string][]string + nodes map[string]string + selfNamespace *crdv1beta1.PeerNamespaces ) const ( @@ -70,27 +77,6 @@ const ( defaultTierName = "application" ) -// TestAntreaPolicyStats is the top-level test which contains all subtests for -// AntreaPolicyStats related test cases so they can share setup, teardown. -func TestAntreaPolicyStats(t *testing.T) { - skipIfHasWindowsNodes(t) - skipIfAntreaPolicyDisabled(t) - skipIfNetworkPolicyStatsDisabled(t) - - data, err := setupTest(t) - if err != nil { - t.Fatalf("Error when setting up test: %v", err) - } - defer teardownTest(t, data) - - t.Run("testANNPNetworkPolicyStatsWithDropAction", func(t *testing.T) { - testANNPNetworkPolicyStatsWithDropAction(t, data) - }) - t.Run("testAntreaClusterNetworkPolicyStats", func(t *testing.T) { - testAntreaClusterNetworkPolicyStats(t, data) - }) -} - func failOnError(err error, t *testing.T) { if err != nil { log.Errorf("%+v", err) @@ -108,29 +94,46 @@ type podToAddrTestStep struct { expectedConnectivity PodConnectivityMark } -func initialize(t *testing.T, data *TestData) { - p80 = 80 - p81 = 81 - p8080 = 8080 - p8081 = 8081 - p8082 = 8082 - p8085 = 8085 - pods = []string{"a", "b", "c"} - namespaces = make(map[string]string) - suffix := randName("") - namespaces["x"] = "x-" + suffix - namespaces["y"] = "y-" + suffix - namespaces["z"] = "z-" + suffix +// Util function to get the runtime name of a test Namespace. +func getNS(ns string) string { + return namespaces[ns].Name +} + +// Util function to get the runtime Pod struct of a test Pod. +func getPod(ns, po string) Pod { + return Pod(namespaces[ns].Name + "/" + po) +} + +// Util function to get the runtime Pod name of a test Pod. +func getPodName(ns, po string) string { + return namespaces[ns].Name + "/" + po +} + +func initialize(t *testing.T, data *TestData, customNamespaces map[string]TestNamespaceMeta) { + selfNamespace = &crdv1beta1.PeerNamespaces{ + Match: crdv1beta1.NamespaceMatchSelf, + } + namespaces = make(map[string]TestNamespaceMeta) + if customNamespaces != nil { + namespaces = customNamespaces + } else { + suffix := randName("") + for _, ns := range []string{"x", "y", "z"} { + namespaces[ns] = TestNamespaceMeta{ + Name: ns + "-" + suffix, + } + } + } // This function "initialize" will be used more than once, and variable "allPods" is global. // It should be empty every time when "initialize" is performed, otherwise there will be unexpected // results. allPods = []Pod{} podsByNamespace = make(map[string][]Pod) - - for _, podName := range pods { + podsPerNamespace = []string{"a", "b", "c"} + for _, podName := range podsPerNamespace { for _, ns := range namespaces { - allPods = append(allPods, NewPod(ns, podName)) - podsByNamespace[ns] = append(podsByNamespace[ns], NewPod(ns, podName)) + allPods = append(allPods, NewPod(ns.Name, podName)) + podsByNamespace[ns.Name] = append(podsByNamespace[ns.Name], NewPod(ns.Name, podName)) } } skipIfAntreaPolicyDisabled(t) @@ -139,7 +142,7 @@ func initialize(t *testing.T, data *TestData) { // k8sUtils is a global var k8sUtils, err = NewKubernetesUtils(data) failOnError(err, t) - ips, err := k8sUtils.Bootstrap(namespaces, pods, true, nil, nil) + ips, err := k8sUtils.Bootstrap(namespaces, podsPerNamespace, true, nil, nil) failOnError(err, t) podIPs = ips } @@ -148,13 +151,13 @@ func skipIfAntreaPolicyDisabled(tb testing.TB) { skipIfFeatureDisabled(tb, features.AntreaPolicy, true, true) } -func applyDefaultDenyToAllNamespaces(k8s *KubernetesUtils, namespaces map[string]string) error { +func applyDefaultDenyToAllNamespaces(k8s *KubernetesUtils, namespaces map[string]TestNamespaceMeta) error { if err := k8s.CleanNetworkPolicies(namespaces); err != nil { return err } for _, ns := range namespaces { builder := &NetworkPolicySpecBuilder{} - builder = builder.SetName(ns, "default-deny-namespace") + builder = builder.SetName(ns.Name, "default-deny-namespace") builder.SetTypeIngress() if _, err := k8s.CreateOrUpdateNetworkPolicy(builder.Get()); err != nil { return err @@ -170,7 +173,7 @@ func applyDefaultDenyToAllNamespaces(k8s *KubernetesUtils, namespaces map[string return nil } -func cleanupDefaultDenyNPs(k8s *KubernetesUtils, namespaces map[string]string) error { +func cleanupDefaultDenyNPs(k8s *KubernetesUtils, namespaces map[string]TestNamespaceMeta) error { if err := k8s.CleanNetworkPolicies(namespaces); err != nil { return err } @@ -191,7 +194,6 @@ func testMutateACNPNoTier(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(10.0) acnp := builder.Get() - log.Debugf("creating ACNP %v", acnp.Name) acnp, err := k8sUtils.CreateOrUpdateACNP(acnp) if err != nil { failOnError(fmt.Errorf("ACNP create failed %v", err), t) @@ -205,11 +207,10 @@ func testMutateACNPNoTier(t *testing.T) { func testMutateANNPNoTier(t *testing.T) { invalidNpErr := fmt.Errorf("ANNP tier not mutated to default tier") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-no-tier"). + builder = builder.SetName(getNS("x"), "annp-no-tier"). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(10.0) annp := builder.Get() - log.Debugf("creating ANNP %v", annp.Name) annp, err := k8sUtils.CreateOrUpdateANNP(annp) if err != nil { failOnError(fmt.Errorf("ANNP create failed %v", err), t) @@ -228,7 +229,6 @@ func testCreateValidationInvalidACNP(t *testing.T) { SetPriority(1.0). SetTier("no-exist") acnp := builder.Get() - log.Debugf("creating ACNP %v", acnp.Name) if _, err := k8sUtils.CreateOrUpdateACNP(acnp); err == nil { // Above creation of ACNP must fail as it is an invalid spec. failOnError(invalidNpErr, t) @@ -242,14 +242,14 @@ func testUpdateValidationInvalidACNP(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(1.0) builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, - nil, nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) acnp := builder.Get() if _, err := k8sUtils.CreateOrUpdateACNP(acnp); err != nil { failOnError(fmt.Errorf("create ACNP acnp-applied-to-update failed: %v", err), t) } builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, nil, - nil, nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, []ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}, crdv1beta1.RuleActionAllow, "", "", nil) acnp = builder.Get() if _, err := k8sUtils.CreateOrUpdateACNP(acnp); err == nil { // Above update of ACNP must fail as it is an invalid spec. @@ -261,7 +261,7 @@ func testUpdateValidationInvalidACNP(t *testing.T) { func testCreateValidationInvalidANNP(t *testing.T) { invalidNpErr := fmt.Errorf("invalid Antrea NetworkPolicy with non-exist tier accepted") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-no-priority"). + builder = builder.SetName(getNS("x"), "annp-no-priority"). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(1.0). SetTier("non-exist") @@ -276,7 +276,7 @@ func testCreateValidationInvalidANNP(t *testing.T) { func testUpdateValidationInvalidANNP(t *testing.T) { invalidNpErr := fmt.Errorf("invalid Antrea NetworkPolicy appliedTo set in both spec and rules accepted") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-applied-to-update"). + builder = builder.SetName(getNS("x"), "annp-applied-to-update"). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(1.0) builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, nil, nil, @@ -343,7 +343,7 @@ func testCreateValidationInvalidCG(t *testing.T) { cgBuilder := &ClusterGroupSpecBuilder{} cgBuilder = cgBuilder.SetName("cg-mix-peer"). SetPodSelector(map[string]string{"pod": "a"}, nil). - SetServiceReference("svc", namespaces["x"]) + SetServiceReference("svc", getNS("x")) cg := cgBuilder.Get() if _, err := k8sUtils.CreateOrUpdateCG(cg); err == nil { // Above creation of ClusterGroup must fail as it is an invalid spec. @@ -360,7 +360,7 @@ func testUpdateValidationInvalidCG(t *testing.T) { if _, err := k8sUtils.CreateOrUpdateCG(cg); err != nil { failOnError(fmt.Errorf("create ClusterGroup %s failed: %v", cg.Name, err), t) } - cgBuilder.SetServiceReference("svc", namespaces["x"]) + cgBuilder.SetServiceReference("svc", getNS("x")) cg = cgBuilder.Get() if _, err := k8sUtils.CreateOrUpdateCG(cg); err == nil { // Above update of ClusterGroup must fail as it is an invalid spec. @@ -372,9 +372,9 @@ func testUpdateValidationInvalidCG(t *testing.T) { func testCreateValidationInvalidGroup(t *testing.T) { invalidErr := fmt.Errorf("Group using podSelecter and serviceReference together created") gBuilder := &GroupSpecBuilder{} - gBuilder = gBuilder.SetName("g-mix-peer").SetNamespace(namespaces["x"]). + gBuilder = gBuilder.SetName("g-mix-peer").SetNamespace(getNS("x")). SetPodSelector(map[string]string{"pod": "a"}, nil). - SetServiceReference("svc", namespaces["x"]) + SetServiceReference("svc", getNS("x")) g := gBuilder.Get() if _, err := k8sUtils.CreateOrUpdateGroup(g); err == nil { // Above creation of Group must fail as it is an invalid spec. @@ -385,13 +385,13 @@ func testCreateValidationInvalidGroup(t *testing.T) { func testUpdateValidationInvalidGroup(t *testing.T) { invalidErr := fmt.Errorf("Group using podSelecter and serviceReference together updated") gBuilder := &GroupSpecBuilder{} - gBuilder = gBuilder.SetName("g-mix-peer").SetNamespace(namespaces["x"]). + gBuilder = gBuilder.SetName("g-mix-peer").SetNamespace(getNS("x")). SetPodSelector(map[string]string{"pod": "a"}, nil) g := gBuilder.Get() if _, err := k8sUtils.CreateOrUpdateGroup(g); err != nil { failOnError(fmt.Errorf("create Group %s/%s failed: %v", g.Namespace, g.Name, err), t) } - gBuilder.SetServiceReference("svc", namespaces["x"]) + gBuilder.SetServiceReference("svc", getNS("x")) g = gBuilder.Get() if _, err := k8sUtils.CreateOrUpdateGroup(g); err == nil { // Above update of Group must fail as it is an invalid spec. @@ -407,24 +407,22 @@ func testACNPAllowXBtoA(t *testing.T) { builder = builder.SetName("acnp-allow-xb-to-a"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["z"]+"/a"), Connected) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Connected) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Connected) + reachability.Expect(getPod("x", "b"), getPod("z", "a"), Connected) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -439,33 +437,33 @@ func testACNPAllowXBtoA(t *testing.T) { // the client Pod and uses it in sourcePort and sourceEndPort of an ACNP rule to verify that // packets can be matched by source port. func testACNPSourcePort(t *testing.T) { - portStart, portEnd, err := k8sUtils.getTCPv4SourcePortRangeFromPod(namespaces["x"], "a") + portStart, portEnd, err := k8sUtils.getTCPv4SourcePortRangeFromPod(getNS("x"), "a") failOnError(err, t) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-source-port"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngressForSrcPort(ProtocolTCP, nil, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + builder.AddIngressForSrcPort(ProtocolTCP, nil, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-source-port"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder2.AddIngressForSrcPort(ProtocolTCP, &p80, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + builder2.AddIngressForSrcPort(ProtocolTCP, &p80, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-source-port"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder3.AddIngressForSrcPort(ProtocolTCP, &p80, &p81, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + builder3.AddIngressForSrcPort(ProtocolTCP, &p80, &p81, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["z"]+"/a"), Dropped) + reachability.Expect(Pod(getNS("x")+"/b"), Pod(getNS("x")+"/a"), Dropped) + reachability.Expect(Pod(getNS("x")+"/b"), Pod(getNS("y")+"/a"), Dropped) + reachability.Expect(Pod(getNS("x")+"/b"), Pod(getNS("z")+"/a"), Dropped) // After adding the dst port constraint of port 80, traffic on port 81 should not be affected. updatedReachability := NewReachability(allPods, Connected) @@ -478,6 +476,8 @@ func testACNPSourcePort(t *testing.T) { ProtocolTCP, 0, nil, + nil, + nil, }, { "Port 81", @@ -487,6 +487,8 @@ func testACNPSourcePort(t *testing.T) { ProtocolTCP, 0, nil, + nil, + nil, }, { "Port range 80-81", @@ -496,6 +498,8 @@ func testACNPSourcePort(t *testing.T) { ProtocolTCP, 0, nil, + nil, + nil, }, } testCase := []*TestCase{ @@ -511,23 +515,21 @@ func testACNPAllowXBtoYA(t *testing.T) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-allow-xb-to-ya"). SetPriority(2.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["y"]}}}) - builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("y")}}}) + builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Connected) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Connected) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ { - "NamedPort 81", - reachability, - []metav1.Object{builder.Get()}, - []int32{81}, - ProtocolTCP, - 0, - nil, + Name: "NamedPort 81", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{81}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -543,36 +545,34 @@ func testACNPPriorityOverrideDefaultDeny(t *testing.T) { builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-priority2"). SetPriority(2). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-priority1"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // Ingress from ns:z to x/a will be dropped since acnp-priority1 has higher precedence. reachabilityBothACNP := NewReachability(allPods, Dropped) - reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Connected) - reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Connected) - reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Connected) - reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Connected) - reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Connected) - reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Connected) + reachabilityBothACNP.Expect(getPod("z", "a"), getPod("x", "b"), Connected) + reachabilityBothACNP.Expect(getPod("z", "a"), getPod("x", "c"), Connected) + reachabilityBothACNP.Expect(getPod("z", "b"), getPod("x", "b"), Connected) + reachabilityBothACNP.Expect(getPod("z", "b"), getPod("x", "c"), Connected) + reachabilityBothACNP.Expect(getPod("z", "c"), getPod("x", "b"), Connected) + reachabilityBothACNP.Expect(getPod("z", "c"), getPod("x", "c"), Connected) reachabilityBothACNP.ExpectSelf(allPods, Connected) testStep := []*TestStep{ { - "Both ACNP", - reachabilityBothACNP, - []metav1.Object{builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Both ACNP", + Reachability: reachabilityBothACNP, + TestResources: []metav1.Object{builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -594,22 +594,20 @@ func testACNPAllowNoDefaultIsolation(t *testing.T, protocol AntreaPolicyProtocol builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-allow-x-ingress-y-egress-z"). SetPriority(1.1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) - builder.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("y")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Connected) testStep := []*TestStep{ { - "Port 81", - reachability, - []metav1.Object{builder.Get()}, - []int32{81}, - protocol, - 0, - nil, + Name: "Port 81", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{81}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -632,23 +630,21 @@ func testACNPDropEgress(t *testing.T, protocol AntreaPolicyProtocol) { builder = builder.SetName("acnp-deny-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -664,24 +660,22 @@ func testACNPDropIngressInSelectedNamespace(t *testing.T) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-ingress-to-x"). SetPriority(1.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "drop-all-ingress", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectAllIngress(Pod(namespaces["x"]+"/a"), Dropped) - reachability.ExpectAllIngress(Pod(namespaces["x"]+"/b"), Dropped) - reachability.ExpectAllIngress(Pod(namespaces["x"]+"/c"), Dropped) + reachability.ExpectAllIngress(getPod("x", "a"), Dropped) + reachability.ExpectAllIngress(getPod("x", "b"), Dropped) + reachability.ExpectAllIngress(getPod("x", "c"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -696,39 +690,35 @@ func testACNPNoEffectOnOtherProtocols(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-z-ingress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability1 := NewReachability(allPods, Connected) - reachability1.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["y"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["z"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["z"]+"/a"), Dropped) + reachability1.Expect(getPod("z", "a"), getPod("x", "a"), Dropped) + reachability1.Expect(getPod("z", "b"), getPod("x", "a"), Dropped) + reachability1.Expect(getPod("z", "c"), getPod("x", "a"), Dropped) + reachability1.Expect(getPod("z", "a"), getPod("y", "a"), Dropped) + reachability1.Expect(getPod("z", "b"), getPod("y", "a"), Dropped) + reachability1.Expect(getPod("z", "c"), getPod("y", "a"), Dropped) + reachability1.Expect(getPod("z", "b"), getPod("z", "a"), Dropped) + reachability1.Expect(getPod("z", "c"), getPod("z", "a"), Dropped) reachability2 := NewReachability(allPods, Connected) testStep := []*TestStep{ { - "Port 80", - reachability1, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability1, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "Port 80", - reachability2, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolUDP, - 0, - nil, + Name: "Port 80", + Reachability: reachability2, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolUDP, }, } testCase := []*TestCase{ @@ -742,30 +732,28 @@ func testACNPAppliedToDenyXBtoCGWithYA(t *testing.T) { cgName := "cg-pods-ya" cgBuilder := &ClusterGroupSpecBuilder{} cgBuilder = cgBuilder.SetName(cgName). - SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil). SetPodSelector(map[string]string{"pod": "a"}, nil) port81Name := "serve-81" builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-ya-from-xb"). SetPriority(2.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ { - "NamedPort 81", - reachability, + Name: "NamedPort 81", + Reachability: reachability, // Note in this testcase the ClusterGroup is created after the ACNP - []metav1.Object{builder.Get(), cgBuilder.Get()}, - []int32{81}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), cgBuilder.Get()}, + Ports: []int32{81}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -779,29 +767,27 @@ func testACNPIngressRuleDenyCGWithXBtoYA(t *testing.T) { cgName := "cg-pods-xb" cgBuilder := &ClusterGroupSpecBuilder{} cgBuilder = cgBuilder.SetName(cgName). - SetNamespaceSelector(map[string]string{"ns": namespaces["x"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("x")}, nil). SetPodSelector(map[string]string{"pod": "b"}, nil) port81Name := "serve-81" builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-xb-to-ya"). SetPriority(2.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["y"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("y")}}}) builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ { - "NamedPort 81", - reachability, - []metav1.Object{cgBuilder.Get(), builder.Get()}, - []int32{81}, - ProtocolTCP, - 0, - nil, + Name: "NamedPort 81", + Reachability: reachability, + TestResources: []metav1.Object{cgBuilder.Get(), builder.Get()}, + Ports: []int32{81}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -818,24 +804,22 @@ func testACNPAppliedToRuleCGWithPodsAToNsZ(t *testing.T) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-a-to-z"). SetPriority(1.0) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, []ACNPAppliedToSpec{{Group: cgName}}, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, []ACNPAppliedToSpec{{Group: cgName}}, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the ClusterGroup is created after the ACNP - []metav1.Object{builder.Get(), cgBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), cgBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -848,29 +832,27 @@ func testACNPAppliedToRuleCGWithPodsAToNsZ(t *testing.T) { func testACNPEgressRulePodsAToCGWithNsZ(t *testing.T) { cgName := "cg-ns-z" cgBuilder := &ClusterGroupSpecBuilder{} - cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil) + cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": getNS("z")}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the ClusterGroup is created after the ACNP - []metav1.Object{builder.Get(), cgBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), cgBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -890,38 +872,34 @@ func testACNPClusterGroupUpdateAppliedTo(t *testing.T) { builder = builder.SetName("acnp-deny-cg-with-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) updatedReachability := NewReachability(allPods, Connected) - updatedReachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/c"), namespaces["z"], Dropped) - updatedReachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/c"), namespaces["z"], Dropped) - updatedReachability.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["z"]+"/a"), Dropped) - updatedReachability.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["z"]+"/b"), Dropped) + updatedReachability.ExpectEgressToNamespace(getPod("x", "c"), getNS("z"), Dropped) + updatedReachability.ExpectEgressToNamespace(getPod("y", "c"), getNS("z"), Dropped) + updatedReachability.Expect(getPod("z", "c"), getPod("z", "a"), Dropped) + updatedReachability.Expect(getPod("z", "c"), getPod("z", "b"), Dropped) testStep := []*TestStep{ { - "CG Pods A", - reachability, - []metav1.Object{cgBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "CG Pods A", + Reachability: reachability, + TestResources: []metav1.Object{cgBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "CG Pods C - update", - updatedReachability, - []metav1.Object{updatedCgBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "CG Pods C - update", + Reachability: updatedReachability, + TestResources: []metav1.Object{updatedCgBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -933,46 +911,42 @@ func testACNPClusterGroupUpdateAppliedTo(t *testing.T) { func testACNPClusterGroupUpdate(t *testing.T) { cgName := "cg-ns-z-then-y" cgBuilder := &ClusterGroupSpecBuilder{} - cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil) + cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": getNS("z")}, nil) // Update CG NS selector to group Pods from Namespace Y updatedCgBuilder := &ClusterGroupSpecBuilder{} - updatedCgBuilder = updatedCgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil) + updatedCgBuilder = updatedCgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) updatedReachability := NewReachability(allPods, Connected) - updatedReachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) - updatedReachability.ExpectEgressToNamespace(Pod(namespaces["z"]+"/a"), namespaces["y"], Dropped) - updatedReachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["y"]+"/b"), Dropped) - updatedReachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["y"]+"/c"), Dropped) + updatedReachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("y"), Dropped) + updatedReachability.ExpectEgressToNamespace(getPod("z", "a"), getNS("y"), Dropped) + updatedReachability.Expect(getPod("y", "a"), getPod("y", "b"), Dropped) + updatedReachability.Expect(getPod("y", "a"), getPod("y", "c"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{cgBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{cgBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "Port 80 - update", - updatedReachability, - []metav1.Object{updatedCgBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 - update", + Reachability: updatedReachability, + TestResources: []metav1.Object{updatedCgBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -985,22 +959,22 @@ func testACNPClusterGroupAppliedToPodAdd(t *testing.T, data *TestData) { cgName := "cg-pod-custom-pod-zj" cgBuilder := &ClusterGroupSpecBuilder{} cgBuilder = cgBuilder.SetName(cgName). - SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("z")}, nil). SetPodSelector(map[string]string{"pod": "j"}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-zj-to-xj-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "j"}, nil, map[string]string{"ns": namespaces["x"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "j"}, nil, map[string]string{"ns": getNS("x")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["z"], "j"), + Pod: NewPod(getNS("z"), "j"), Labels: map[string]string{"pod": "j"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["x"], "j"), + Pod: NewPod(getNS("x"), "j"), Labels: map[string]string{"pod": "j"}, }, ExpectConnectivity: Dropped, @@ -1009,13 +983,11 @@ func testACNPClusterGroupAppliedToPodAdd(t *testing.T, data *TestData) { } testStep := []*TestStep{ { - "Port 80", - nil, - []metav1.Object{cgBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + Name: "Port 80", + TestResources: []metav1.Object{cgBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, }, } testCase := []*TestCase{ @@ -1028,7 +1000,7 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { cgName := "cg-pod-custom-pod-zk" cgBuilder := &ClusterGroupSpecBuilder{} cgBuilder = cgBuilder.SetName(cgName). - SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("z")}, nil). SetPodSelector(map[string]string{"pod": "k"}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-xk-to-cg-with-zk-egress"). @@ -1036,19 +1008,19 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { SetAppliedToGroup([]ACNPAppliedToSpec{ { PodSelector: map[string]string{"pod": "k"}, - NSSelector: map[string]string{"ns": namespaces["x"]}, + NSSelector: map[string]string{"ns": getNS("x")}, }, }) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["x"], "k"), + Pod: NewPod(getNS("x"), "k"), Labels: map[string]string{"pod": "k"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["z"], "k"), + Pod: NewPod(getNS("z"), "k"), Labels: map[string]string{"pod": "k"}, }, ExpectConnectivity: Dropped, @@ -1057,14 +1029,12 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { } testStep := []*TestStep{ { - "Port 80", - nil, + Name: "Port 80", // Note in this testcase the ClusterGroup is created after the ACNP - []metav1.Object{builder.Get(), cgBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + TestResources: []metav1.Object{builder.Get(), cgBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, }, } testCase := []*TestCase{ @@ -1074,10 +1044,10 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { } func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) { - podXAIP, _ := podIPs[namespaces["x"]+"/a"] - podXBIP, _ := podIPs[namespaces["x"]+"/b"] - podXCIP, _ := podIPs[namespaces["x"]+"/c"] - podZAIP, _ := podIPs[namespaces["z"]+"/a"] + podXAIP, _ := podIPs[getPodName("x", "a")] + podXBIP, _ := podIPs[getPodName("x", "b")] + podXCIP, _ := podIPs[getPodName("x", "c")] + podZAIP, _ := podIPs[getPodName("z", "a")] // There are three situations of a Pod's IP(s): // 1. Only one IPv4 address. // 2. Only one IPv6 address. @@ -1112,28 +1082,26 @@ func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{ { PodSelector: map[string]string{"pod": "a"}, - NSSelector: map[string]string{"ns": namespaces["y"]}, + NSSelector: map[string]string{"ns": getNS("y")}, }, }) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/c"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("x", "c"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("y", "a"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get(), cgBuilder.Get(), cgBuilder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), cgBuilder.Get(), cgBuilder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1146,26 +1114,24 @@ func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) { func testANNPEgressRulePodsAToGrpWithPodsC(t *testing.T) { grpName := "grp-xc" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "c"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "c"}, nil) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-xa-to-grp-xc-egress"). + builder = builder.SetName(getNS("x"), "annp-deny-xa-to-grp-xc-egress"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grpName, "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("x", "c"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the Group is created after the ANNP - []metav1.Object{builder.Get(), grpBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), grpBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1178,28 +1144,26 @@ func testANNPEgressRulePodsAToGrpWithPodsC(t *testing.T) { func testANNPIngressRuleDenyGrpWithXCtoXA(t *testing.T) { grpName := "grp-pods-xb" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "b"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "b"}, nil) port81Name := "serve-81" builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-grp-with-xb-to-xa"). + builder = builder.SetName(getNS("x"), "annp-deny-grp-with-xb-to-xa"). SetPriority(2.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grpName, "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ { - "NamedPort 81", - reachability, - []metav1.Object{grpBuilder.Get(), builder.Get()}, - []int32{81}, - ProtocolTCP, - 0, - nil, + Name: "NamedPort 81", + Reachability: reachability, + TestResources: []metav1.Object{grpBuilder.Get(), builder.Get()}, + Ports: []int32{81}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1211,40 +1175,36 @@ func testANNPIngressRuleDenyGrpWithXCtoXA(t *testing.T) { func testANNPGroupUpdate(t *testing.T) { grpName := "grp-pod-xc-then-pod-xb" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "c"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "c"}, nil) // Update Group Pod selector from X/C to X/B updatedGrpBuilder := &GroupSpecBuilder{} - updatedGrpBuilder = updatedGrpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "b"}, nil) + updatedGrpBuilder = updatedGrpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "b"}, nil) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-xa-to-grp-with-xc-egress"). + builder = builder.SetName(getNS("x"), "annp-deny-xa-to-grp-with-xc-egress"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grpName, "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("x", "c"), Dropped) updatedReachability := NewReachability(allPods, Connected) - updatedReachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) + updatedReachability.Expect(getPod("x", "a"), getPod("x", "b"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{grpBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{grpBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "Port 80 - update", - updatedReachability, - []metav1.Object{updatedGrpBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 - update", + Reachability: updatedReachability, + TestResources: []metav1.Object{updatedGrpBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1257,29 +1217,27 @@ func testANNPGroupUpdate(t *testing.T) { func testANNPAppliedToDenyXBtoGrpWithXA(t *testing.T) { grpName := "grp-pods-ya" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "a"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "a"}, nil) port81Name := "serve-81" builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-grp-with-xa-from-xb"). + builder = builder.SetName(getNS("x"), "annp-deny-grp-with-xa-from-xb"). SetPriority(2.0). SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grpName}}) builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ { - "NamedPort 81", - reachability, + Name: "NamedPort 81", + Reachability: reachability, // Note in this testcase the Group is created after the ANNP - []metav1.Object{builder.Get(), grpBuilder.Get()}, - []int32{81}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), grpBuilder.Get()}, + Ports: []int32{81}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1292,25 +1250,23 @@ func testANNPAppliedToDenyXBtoGrpWithXA(t *testing.T) { func testANNPAppliedToRuleGrpWithPodsAToPodsC(t *testing.T) { grpName := "grp-pods-a" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "a"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "a"}, nil) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-grp-with-a-to-c"). + builder = builder.SetName(getNS("x"), "annp-deny-grp-with-a-to-c"). SetPriority(1.0) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, nil, nil, nil, nil, nil, []ANNPAppliedToSpec{{Group: grpName}}, crdv1beta1.RuleActionDrop, "", "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("x", "c"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the Group is created after the ANNP - []metav1.Object{builder.Get(), grpBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), grpBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1322,40 +1278,36 @@ func testANNPAppliedToRuleGrpWithPodsAToPodsC(t *testing.T) { func testANNPGroupUpdateAppliedTo(t *testing.T) { grpName := "grp-pods-xa-then-xb" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "a"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "a"}, nil) // Update GRP Pod selector to group Pods x/b updatedGrpBuilder := &GroupSpecBuilder{} - updatedGrpBuilder = updatedGrpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "b"}, nil) + updatedGrpBuilder = updatedGrpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "b"}, nil) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-grp-xc-to-xa-egress"). + builder = builder.SetName(getNS("x"), "annp-deny-grp-xc-to-xa-egress"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grpName}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("x", "c"), Dropped) updatedReachability := NewReachability(allPods, Connected) - updatedReachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) + updatedReachability.Expect(getPod("x", "b"), getPod("x", "c"), Dropped) testStep := []*TestStep{ { - "GRP Pods X/C", - reachability, - []metav1.Object{grpBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "GRP Pods X/C", + Reachability: reachability, + TestResources: []metav1.Object{grpBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "GRP Pods X/B - update", - updatedReachability, - []metav1.Object{updatedGrpBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "GRP Pods X/B - update", + Reachability: updatedReachability, + TestResources: []metav1.Object{updatedGrpBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1367,9 +1319,9 @@ func testANNPGroupUpdateAppliedTo(t *testing.T) { func testANNPGroupAppliedToPodAdd(t *testing.T, data *TestData) { grpName := "grp-pod-custom-pod-xj" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "j"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "j"}, nil) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-grp-with-xj-to-xd-egress"). + builder = builder.SetName(getNS("x"), "annp-deny-grp-with-xj-to-xd-egress"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grpName}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "d"}, nil, nil, @@ -1377,11 +1329,11 @@ func testANNPGroupAppliedToPodAdd(t *testing.T, data *TestData) { cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["x"], "j"), + Pod: NewPod(getNS("x"), "j"), Labels: map[string]string{"pod": "j"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["x"], "d"), + Pod: NewPod(getNS("x"), "d"), Labels: map[string]string{"pod": "d"}, }, ExpectConnectivity: Dropped, @@ -1390,13 +1342,11 @@ func testANNPGroupAppliedToPodAdd(t *testing.T, data *TestData) { } testStep := []*TestStep{ { - "Port 80", - nil, - []metav1.Object{grpBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + Name: "Port 80", + TestResources: []metav1.Object{grpBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, }, } testCase := []*TestCase{ @@ -1406,17 +1356,17 @@ func testANNPGroupAppliedToPodAdd(t *testing.T, data *TestData) { } func testANNPGroupServiceRefPodAdd(t *testing.T, data *TestData) { - svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) - svc2 := k8sUtils.BuildService("svc2", namespaces["x"], 80, 80, map[string]string{"app": "b"}, nil) + svc1 := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "a"}, nil) + svc2 := k8sUtils.BuildService("svc2", getNS("x"), 80, 80, map[string]string{"app": "b"}, nil) grp1Name, grp2Name := "grp-svc1", "grp-svc2" grpBuilder1 := &GroupSpecBuilder{} - grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc1") + grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc1") grpBuilder2 := &GroupSpecBuilder{} - grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc2") + grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc2") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-grp-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grp1Name}}) + builder = builder.SetName(getNS("x"), "annp-grp-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grp1Name}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grp2Name, "") @@ -1425,11 +1375,11 @@ func testANNPGroupServiceRefPodAdd(t *testing.T, data *TestData) { cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["x"], svc2PodName), + Pod: NewPod(getNS("x"), svc2PodName), Labels: map[string]string{"pod": svc2PodName, "app": "b"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["x"], svc1PodName), + Pod: NewPod(getNS("x"), svc1PodName), Labels: map[string]string{"pod": svc1PodName, "app": "a"}, }, ExpectConnectivity: Dropped, @@ -1438,15 +1388,14 @@ func testANNPGroupServiceRefPodAdd(t *testing.T, data *TestData) { } reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) testStep := &TestStep{ - "Port 80 updated", - reachability, - []metav1.Object{svc1, svc2, grpBuilder1.Get(), grpBuilder2.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + Name: "Port 80 updated", + Reachability: reachability, + TestResources: []metav1.Object{svc1, svc2, grpBuilder1.Get(), grpBuilder2.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, } testSteps := []*TestStep{testStep} @@ -1457,8 +1406,8 @@ func testANNPGroupServiceRefPodAdd(t *testing.T, data *TestData) { } func testANNPGroupServiceRefDelete(t *testing.T) { - svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) - svc2 := k8sUtils.BuildService("svc2", namespaces["x"], 80, 80, map[string]string{"app": "b"}, nil) + svc1 := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "a"}, nil) + svc2 := k8sUtils.BuildService("svc2", getNS("x"), 80, 80, map[string]string{"app": "b"}, nil) k8sUtils.CreateOrUpdateService(svc1) failOnError(waitForResourceReady(t, timeout, svc1), t) k8sUtils.CreateOrUpdateService(svc2) @@ -1466,9 +1415,9 @@ func testANNPGroupServiceRefDelete(t *testing.T) { grp1Name, grp2Name := "grp-svc1", "grp-svc2" grpBuilder1 := &GroupSpecBuilder{} - grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc1") + grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc1") grpBuilder2 := &GroupSpecBuilder{} - grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc2") + grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc2") grp1 := grpBuilder1.Get() k8sUtils.CreateOrUpdateGroup(grp1) failOnError(waitForResourceReady(t, timeout, grp1), t) @@ -1477,7 +1426,7 @@ func testANNPGroupServiceRefDelete(t *testing.T) { failOnError(waitForResourceReady(t, timeout, grp2), t) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-grp-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grp1Name}}) + builder = builder.SetName(getNS("x"), "annp-grp-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grp1Name}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grp2Name, "") annp := builder.Get() @@ -1485,7 +1434,7 @@ func testANNPGroupServiceRefDelete(t *testing.T) { failOnError(waitForResourceReady(t, timeout, annp), t) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) k8sUtils.Validate(allPods, reachability, []int32{80}, ProtocolTCP) _, wrong, _ := reachability.Summary() if wrong != 0 { @@ -1508,49 +1457,45 @@ func testANNPGroupServiceRefDelete(t *testing.T) { } func testANNPGroupServiceRefCreateAndUpdate(t *testing.T) { - svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) - svc2 := k8sUtils.BuildService("svc2", namespaces["x"], 80, 80, map[string]string{"app": "b"}, nil) + svc1 := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "a"}, nil) + svc2 := k8sUtils.BuildService("svc2", getNS("x"), 80, 80, map[string]string{"app": "b"}, nil) grp1Name, grp2Name := "grp-svc1", "grp-svc2" grpBuilder1 := &GroupSpecBuilder{} - grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc1") + grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc1") grpBuilder2 := &GroupSpecBuilder{} - grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc2") + grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc2") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-grp-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grp1Name}}) + builder = builder.SetName(getNS("x"), "annp-grp-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grp1Name}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grp2Name, "") // Pods backing svc1 (label pod=a) in Namespace x should not allow ingress from Pods backing svc2 (label pod=b) in Namespace x. reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) testStep1 := &TestStep{ - "Port 80", - reachability, - []metav1.Object{svc1, svc2, grpBuilder1.Get(), grpBuilder2.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{svc1, svc2, grpBuilder1.Get(), grpBuilder2.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } // Test update selector of Service referred in grp-svc1, and update serviceReference of grp-svc2. - svc1Updated := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "b"}, nil) - svc3 := k8sUtils.BuildService("svc3", namespaces["x"], 80, 80, map[string]string{"app": "c"}, nil) - grpBuilder2Updated := grpBuilder2.SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc3") + svc1Updated := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "b"}, nil) + svc3 := k8sUtils.BuildService("svc3", getNS("x"), 80, 80, map[string]string{"app": "c"}, nil) + grpBuilder2Updated := grpBuilder2.SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc3") // Pods backing svc1 (label pod=b) in namespace x should not allow ingress from Pods backing svc3 (label pod=d) in namespace x. reachability2 := NewReachability(allPods, Connected) - reachability2.Expect(Pod(namespaces["x"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) + reachability2.Expect(getPod("x", "c"), getPod("x", "b"), Dropped) testStep2 := &TestStep{ - "Port 80 updated", - reachability2, - []metav1.Object{svc1Updated, svc3, grpBuilder1.Get(), grpBuilder2Updated.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 updated", + Reachability: reachability2, + TestResources: []metav1.Object{svc1Updated, svc3, grpBuilder1.Get(), grpBuilder2Updated.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testSteps := []*TestStep{testStep1, testStep2} @@ -1561,8 +1506,8 @@ func testANNPGroupServiceRefCreateAndUpdate(t *testing.T) { } func testANNPGroupRefRuleIPBlocks(t *testing.T) { - podXBIP, _ := podIPs[namespaces["x"]+"/b"] - podXCIP, _ := podIPs[namespaces["x"]+"/c"] + podXBIP, _ := podIPs[getPodName("x", "b")] + podXCIP, _ := podIPs[getPodName("x", "c")] // There are three situations of a Pod's IP(s): // 1. Only one IPv4 address. // 2. Only one IPv6 address. @@ -1582,27 +1527,25 @@ func testANNPGroupRefRuleIPBlocks(t *testing.T) { grpName := "grp-ipblocks-pod-xb-xc" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetIPBlocks(ipBlock) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetIPBlocks(ipBlock) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-xb-xc-ips-ingress-for-xa"). + builder = builder.SetName(getNS("x"), "annp-deny-xb-xc-ips-ingress-for-xa"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grpName, "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/c"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) + reachability.Expect(getPod("x", "c"), getPod("x", "a"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get(), grpBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), grpBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1612,21 +1555,21 @@ func testANNPGroupRefRuleIPBlocks(t *testing.T) { } func testANNPNestedGroupCreateAndUpdate(t *testing.T, data *TestData) { - svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) + svc1 := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "a"}, nil) svc1PodName := randName("test-pod-svc1-") grp1Name, grp2Name, grp3Name := "grp-svc-x-a", "grp-select-x-b", "grp-select-x-c" grpBuilder1 := &GroupSpecBuilder{} - grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc1") + grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc1") grpBuilder2 := &GroupSpecBuilder{} - grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "b"}, nil) + grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "b"}, nil) grpBuilder3 := &GroupSpecBuilder{} - grpBuilder3 = grpBuilder3.SetName(grp3Name).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "c"}, nil) + grpBuilder3 = grpBuilder3.SetName(grp3Name).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "c"}, nil) grpNestedName := "grp-nested" grpBuilderNested := &GroupSpecBuilder{} - grpBuilderNested = grpBuilderNested.SetName(grpNestedName).SetNamespace(namespaces["x"]).SetChildGroups([]string{grp1Name, grp3Name}) + grpBuilderNested = grpBuilderNested.SetName(grpNestedName).SetNamespace(getNS("x")).SetChildGroups([]string{grp1Name, grp3Name}) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-nested-grp").SetPriority(1.0). + builder = builder.SetName(getNS("x"), "annp-nested-grp").SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{}}). AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grpNestedName, "") @@ -1635,36 +1578,34 @@ func testANNPNestedGroupCreateAndUpdate(t *testing.T, data *TestData) { // Note that in this testStep grp3 will not be created yet, so even though grp-nested selects grp1 and // grp3 as childGroups, only members of grp1 will be included as this time. reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["x"], Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("x"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep1 := &TestStep{ - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the Group is created after the ANNP - []metav1.Object{builder.Get(), svc1, grpBuilder1.Get(), grpBuilderNested.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), svc1, grpBuilder1.Get(), grpBuilderNested.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } // Test update "grp-nested" to include "grp-select-x-b" as well. grpBuilderNested = grpBuilderNested.SetChildGroups([]string{grp1Name, grp2Name, grp3Name}) // In addition to x/a, all traffic from x/b to Namespace x should also be denied. reachability2 := NewReachability(allPods, Connected) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["x"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/b"), namespaces["x"], Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "a"), getNS("x"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "b"), getNS("x"), Dropped) reachability2.ExpectSelf(allPods, Connected) // New member in grp-svc-x-a should be reflected in grp-nested as well. cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["x"], svc1PodName), + Pod: NewPod(getNS("x"), svc1PodName), Labels: map[string]string{"pod": svc1PodName, "app": "a"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["x"], "test-add-pod-ns-x"), + Pod: NewPod(getNS("x"), "test-add-pod-ns-x"), Labels: map[string]string{"pod": "test-add-pod-ns-x"}, }, ExpectConnectivity: Dropped, @@ -1672,30 +1613,27 @@ func testANNPNestedGroupCreateAndUpdate(t *testing.T, data *TestData) { }, } testStep2 := &TestStep{ - "Port 80 updated", - reachability2, - []metav1.Object{grpBuilder2.Get(), grpBuilderNested.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + Name: "Port 80 updated", + Reachability: reachability2, + TestResources: []metav1.Object{grpBuilder2.Get(), grpBuilderNested.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, } // In this testStep grp3 is created. It's members should reflect in grp-nested // and as a result, all traffic from x/c to Namespace x should be denied as well. reachability3 := NewReachability(allPods, Connected) - reachability3.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["x"], Dropped) - reachability3.ExpectEgressToNamespace(Pod(namespaces["x"]+"/b"), namespaces["x"], Dropped) - reachability3.ExpectEgressToNamespace(Pod(namespaces["x"]+"/c"), namespaces["x"], Dropped) + reachability3.ExpectEgressToNamespace(getPod("x", "a"), getNS("x"), Dropped) + reachability3.ExpectEgressToNamespace(getPod("x", "b"), getNS("x"), Dropped) + reachability3.ExpectEgressToNamespace(getPod("x", "c"), getNS("x"), Dropped) reachability3.ExpectSelf(allPods, Connected) testStep3 := &TestStep{ - "Port 80 updated", - reachability3, - []metav1.Object{grpBuilder3.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 updated", + Reachability: reachability3, + TestResources: []metav1.Object{grpBuilder3.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testSteps := []*TestStep{testStep1, testStep2, testStep3} @@ -1712,45 +1650,43 @@ func testBaselineNamespaceIsolation(t *testing.T) { nsExpOtherThanX := metav1.LabelSelectorRequirement{ Key: "ns", Operator: metav1.LabelSelectorOpNotIn, - Values: []string{namespaces["x"]}, + Values: []string{getNS("x")}, } builder = builder.SetName("acnp-baseline-isolate-ns-x"). SetTier("baseline"). SetPriority(1.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, []metav1.LabelSelectorRequirement{nsExpOtherThanX}, false, - nil, crdv1beta1.RuleActionDrop, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, []metav1.LabelSelectorRequirement{nsExpOtherThanX}, nil, + nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // create a K8s NetworkPolicy for Pods in namespace x to allow ingress traffic from Pods in the same namespace, // as well as from the y/a Pod. It should open up ingress from y/a since it's evaluated before the baseline tier. k8sNPBuilder := &NetworkPolicySpecBuilder{} - k8sNPBuilder = k8sNPBuilder.SetName(namespaces["x"], "allow-ns-x-and-y-a"). + k8sNPBuilder = k8sNPBuilder.SetName(getNS("x"), "allow-ns-x-and-y-a"). SetTypeIngress(). AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, - nil, map[string]string{"ns": namespaces["x"]}, nil, nil). + nil, map[string]string{"ns": getNS("x")}, nil, nil). AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, - map[string]string{"pod": "a"}, map[string]string{"ns": namespaces["y"]}, nil, nil) + map[string]string{"pod": "a"}, map[string]string{"ns": getNS("y")}, nil, nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["y"]+"/c"), Pod(namespaces["x"]+"/a"), Dropped) - reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["y"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) - reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/b"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) - reachability.Expect(Pod(namespaces["y"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) - reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/c"), namespaces["z"], Dropped) + reachability.Expect(getPod("y", "b"), getPod("x", "a"), Dropped) + reachability.Expect(getPod("y", "c"), getPod("x", "a"), Dropped) + reachability.ExpectIngressFromNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("y", "b"), getPod("x", "b"), Dropped) + reachability.Expect(getPod("y", "c"), getPod("x", "b"), Dropped) + reachability.ExpectIngressFromNamespace(getPod("x", "b"), getNS("z"), Dropped) + reachability.Expect(getPod("y", "b"), getPod("x", "c"), Dropped) + reachability.Expect(getPod("y", "c"), getPod("x", "c"), Dropped) + reachability.ExpectIngressFromNamespace(getPod("x", "c"), getNS("z"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get(), k8sNPBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), k8sNPBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1758,7 +1694,7 @@ func testBaselineNamespaceIsolation(t *testing.T) { } executeTests(t, testCase) // Cleanup the K8s NetworkPolicy created for this test. - failOnError(k8sUtils.CleanNetworkPolicies(map[string]string{"x": namespaces["x"]}), t) + failOnError(k8sUtils.CleanNetworkPolicies(map[string]TestNamespaceMeta{"x": {Name: getNS("x")}}), t) time.Sleep(networkPolicyDelay) } @@ -1768,65 +1704,61 @@ func testACNPPriorityOverride(t *testing.T) { builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-priority1"). SetPriority(1.001). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Highest priority. Drops traffic from z/b to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-priority2"). SetPriority(1.002). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Medium priority. Allows traffic from z to x/a. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-priority3"). SetPriority(1.003). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // Lowest priority. Drops traffic from z to x. - builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "b"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "b"), getPod("x", "c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "c"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "c"), getPod("x", "c"), Dropped) reachabilityAllACNPs := NewReachability(allPods, Connected) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "a"), getPod("x", "b"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "a"), getPod("x", "c"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "b"), getPod("x", "a"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "b"), getPod("x", "b"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "b"), getPod("x", "c"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "c"), getPod("x", "b"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "c"), getPod("x", "c"), Dropped) testStepTwoACNP := []*TestStep{ { - "Two Policies with different priorities", - reachabilityTwoACNPs, - []metav1.Object{builder3.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Two Policies with different priorities", + Reachability: reachabilityTwoACNPs, + TestResources: []metav1.Object{builder3.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } // Create the Policies in specific order to make sure that priority re-assignments work as expected. testStepAll := []*TestStep{ { - "All three Policies", - reachabilityAllACNPs, - []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "All three Policies", + Reachability: reachabilityAllACNPs, + TestResources: []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1843,66 +1775,62 @@ func testACNPTierOverride(t *testing.T) { builder1 = builder1.SetName("acnp-tier-emergency"). SetTier("emergency"). SetPriority(100). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Highest priority tier. Drops traffic from z/b to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-securityops"). SetTier("securityops"). SetPriority(10). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Medium priority tier. Allows traffic from z to x/a. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-tier-application"). SetTier("application"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // Lowest priority tier. Drops traffic from z to x. - builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "b"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "b"), getPod("x", "c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "c"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "c"), getPod("x", "c"), Dropped) reachabilityAllACNPs := NewReachability(allPods, Connected) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "a"), getPod("x", "b"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "a"), getPod("x", "c"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "b"), getPod("x", "a"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "b"), getPod("x", "b"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "b"), getPod("x", "c"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "c"), getPod("x", "b"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "c"), getPod("x", "c"), Dropped) testStepTwoACNP := []*TestStep{ { - "Two Policies in different tiers", - reachabilityTwoACNPs, - []metav1.Object{builder3.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Two Policies in different tiers", + Reachability: reachabilityTwoACNPs, + TestResources: []metav1.Object{builder3.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testStepAll := []*TestStep{ { - "All three Policies in different tiers", - reachabilityAllACNPs, - []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "All three Policies in different tiers", + Reachability: reachabilityAllACNPs, + TestResources: []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1927,36 +1855,34 @@ func testACNPCustomTiers(t *testing.T) { builder1 = builder1.SetName("acnp-tier-high"). SetTier("high-priority"). SetPriority(100). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Medium priority tier. Allows traffic from z to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-low"). SetTier("low-priority"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // Lowest priority tier. Drops traffic from z to x. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "b"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "b"), getPod("x", "c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "c"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "c"), getPod("x", "c"), Dropped) testStepTwoACNP := []*TestStep{ { - "Two Policies in different tiers", - reachabilityTwoACNPs, - []metav1.Object{builder2.Get(), builder1.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Two Policies in different tiers", + Reachability: reachabilityTwoACNPs, + TestResources: []metav1.Object{builder2.Get(), builder1.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1975,32 +1901,30 @@ func testACNPPriorityConflictingRule(t *testing.T) { builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-drop"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-allow"). SetPriority(2). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // The following ingress rule will take no effect as it is exactly the same as ingress rule of cnp-drop, // but cnp-allow has lower priority. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachabilityBothACNP := NewReachability(allPods, Connected) - reachabilityBothACNP.ExpectEgressToNamespace(Pod(namespaces["z"]+"/a"), namespaces["x"], Dropped) - reachabilityBothACNP.ExpectEgressToNamespace(Pod(namespaces["z"]+"/b"), namespaces["x"], Dropped) - reachabilityBothACNP.ExpectEgressToNamespace(Pod(namespaces["z"]+"/c"), namespaces["x"], Dropped) + reachabilityBothACNP.ExpectEgressToNamespace(getPod("z", "a"), getNS("x"), Dropped) + reachabilityBothACNP.ExpectEgressToNamespace(getPod("z", "b"), getNS("x"), Dropped) + reachabilityBothACNP.ExpectEgressToNamespace(getPod("z", "c"), getNS("x"), Dropped) testStep := []*TestStep{ { - "Both ACNP", - reachabilityBothACNP, - []metav1.Object{builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Both ACNP", + Reachability: reachabilityBothACNP, + TestResources: []metav1.Object{builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -2016,38 +1940,36 @@ func testACNPRulePriority(t *testing.T) { // acnp-deny will apply to all pods in namespace x builder1 = builder1.SetName("acnp-deny"). SetPriority(5). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("y")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // This rule should take no effect as it will be overridden by the first rule of cnp-allow - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} // acnp-allow will also apply to all pods in namespace x builder2 = builder2.SetName("acnp-allow"). SetPriority(5). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) - // This rule should take no effect as it will be overridden by the first rule of cnp-deny - builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) + // This rule should take no effect as it will be overridden by the first rule of cnp-drop + builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("y")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) // Only egress from pods in namespace x to namespace y should be denied reachabilityBothACNP := NewReachability(allPods, Connected) - reachabilityBothACNP.ExpectIngressFromNamespace(Pod(namespaces["y"]+"/a"), namespaces["x"], Dropped) - reachabilityBothACNP.ExpectIngressFromNamespace(Pod(namespaces["y"]+"/b"), namespaces["x"], Dropped) - reachabilityBothACNP.ExpectIngressFromNamespace(Pod(namespaces["y"]+"/c"), namespaces["x"], Dropped) + reachabilityBothACNP.ExpectIngressFromNamespace(getPod("y", "a"), getNS("x"), Dropped) + reachabilityBothACNP.ExpectIngressFromNamespace(getPod("y", "b"), getNS("x"), Dropped) + reachabilityBothACNP.ExpectIngressFromNamespace(getPod("y", "c"), getNS("x"), Dropped) testStep := []*TestStep{ { - "Both ACNP", - reachabilityBothACNP, - []metav1.Object{builder2.Get(), builder1.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Both ACNP", + Reachability: reachabilityBothACNP, + TestResources: []metav1.Object{builder2.Get(), builder1.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -2062,23 +1984,21 @@ func testACNPPortRange(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-z-egress-port-range"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil) + builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) testSteps := []*TestStep{ { - fmt.Sprintf("ACNP Drop Ports 8080:8082"), - reachability, - []metav1.Object{builder.Get()}, - []int32{8080, 8081, 8082}, - ProtocolTCP, - 0, - nil, + Name: fmt.Sprintf("ACNP Drop Ports 8080:8082"), + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{8080, 8081, 8082}, + Protocol: ProtocolTCP, }, } @@ -2094,23 +2014,21 @@ func testACNPRejectEgress(t *testing.T) { builder = builder.SetName("acnp-reject-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Rejected) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Rejected) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Rejected) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Rejected) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Rejected) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Rejected) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Rejected) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Rejected) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -2125,23 +2043,21 @@ func testACNPRejectIngress(t *testing.T, protocol AntreaPolicyProtocol) { builder = builder.SetName("acnp-reject-a-from-z-ingress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Rejected) - reachability.ExpectIngressFromNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Rejected) - reachability.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["z"]+"/a"), Rejected) - reachability.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["z"]+"/a"), Rejected) + reachability.ExpectIngressFromNamespace(getPod("x", "a"), getNS("z"), Rejected) + reachability.ExpectIngressFromNamespace(getPod("y", "a"), getNS("z"), Rejected) + reachability.Expect(getPod("z", "b"), getPod("z", "a"), Rejected) + reachability.Expect(getPod("z", "c"), getPod("z", "a"), Rejected) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -2183,10 +2099,10 @@ func testRejectServiceTraffic(t *testing.T, data *TestData, clientNamespace, ser builder1 = builder1.SetName("acnp-reject-egress-svc-traffic"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": "agnhost-client"}}}) - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, svc1.Spec.Selector, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, svc2.Spec.Selector, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "s1"}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "s2"}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) acnpEgress := builder1.Get() k8sUtils.CreateOrUpdateACNP(acnpEgress) @@ -2210,8 +2126,8 @@ func testRejectServiceTraffic(t *testing.T, data *TestData, clientNamespace, ser builder2 = builder2.SetName("acnp-reject-ingress-svc-traffic"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: svc1.Spec.Selector}, {PodSelector: svc2.Spec.Selector}}) - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "agnhost-client"}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "agnhost-client"}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) acnpIngress := builder2.Get() k8sUtils.CreateOrUpdateACNP(acnpIngress) @@ -2301,10 +2217,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-reject-ingress-double-dir"). SetPriority(1.0) - builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, - nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil) - builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, - nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil) + builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, + nil, nil, nil, nil, nil, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil) + builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, + nil, nil, nil, nil, nil, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil) runTestsWithACNP(builder1.Get(), testcases) @@ -2312,10 +2228,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-reject-egress-double-dir"). SetPriority(1.0) - builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, - nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil) - builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, - nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil) + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, + nil, nil, nil, nil, nil, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil) + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, + nil, nil, nil, nil, nil, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil) runTestsWithACNP(builder2.Get(), testcases) @@ -2324,10 +2240,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser builder3 = builder3.SetName("acnp-reject-server-double-dir"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}) - builder3.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) - builder3.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder3.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder3.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) runTestsWithACNP(builder3.Get(), testcases) @@ -2336,10 +2252,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser builder4 = builder4.SetName("acnp-reject-client-double-dir"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}) - builder4.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) - builder4.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder4.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder4.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) runTestsWithACNP(builder4.Get(), testcases) } @@ -2347,24 +2263,22 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser // testANNPPortRange tests the port range in a ANNP can work. func testANNPPortRange(t *testing.T) { builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["y"], "annp-deny-yb-to-xc-egress-port-range"). + builder = builder.SetName(getNS("y"), "annp-deny-yb-to-xc-egress-port-range"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}) - builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "annp-port-range") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) + reachability.Expect(getPod("y", "b"), getPod("x", "c"), Dropped) var testSteps []*TestStep testSteps = append(testSteps, &TestStep{ - fmt.Sprintf("ANNP Drop Ports 8080:8082"), - reachability, - []metav1.Object{builder.Get()}, - []int32{8080, 8081, 8082}, - ProtocolTCP, - 0, - nil, + Name: fmt.Sprintf("ANNP Drop Ports 8080:8082"), + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{8080, 8081, 8082}, + Protocol: ProtocolTCP, }) testCase := []*TestCase{ @@ -2377,40 +2291,36 @@ func testANNPPortRange(t *testing.T) { // that specifies that. Also it tests that a K8s NetworkPolicy with same appliedTo will not affect its behavior. func testANNPBasic(t *testing.T) { builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["y"], "np-same-name"). + builder = builder.SetName(getNS("y"), "np-same-name"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } // build a K8s NetworkPolicy that has the same appliedTo but allows all traffic. k8sNPBuilder := &NetworkPolicySpecBuilder{} - k8sNPBuilder = k8sNPBuilder.SetName(namespaces["y"], "np-same-name"). + k8sNPBuilder = k8sNPBuilder.SetName(getNS("y"), "np-same-name"). SetPodSelector(map[string]string{"pod": "a"}) k8sNPBuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil) testStep2 := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get(), k8sNPBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), k8sNPBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -2424,14 +2334,14 @@ func testANNPBasic(t *testing.T) { // update on the Antrea NetworkPolicy allows traffic from X/B to Y/A on port 80. func testANNPUpdate(t *testing.T, data *TestData) { builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["y"], "np-before-update"). + builder = builder.SetName(getNS("y"), "np-before-update"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) annp, err := k8sUtils.CreateOrUpdateANNP(builder.Get()) failOnError(err, t) failOnError(data.waitForANNPRealized(t, annp.Namespace, annp.Name, policyRealizedTimeout), t) @@ -2443,10 +2353,10 @@ func testANNPUpdate(t *testing.T, data *TestData) { } updatedBuilder := &AntreaNetworkPolicySpecBuilder{} - updatedBuilder = updatedBuilder.SetName(namespaces["y"], "np-before-update"). + updatedBuilder = updatedBuilder.SetName(getNS("y"), "np-before-update"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - updatedBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + updatedBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "") updatedReachability := NewReachability(allPods, Connected) annp, err = k8sUtils.CreateOrUpdateANNP(updatedBuilder.Get()) @@ -2467,22 +2377,22 @@ func testANNPUpdate(t *testing.T, data *TestData) { func testANNPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { tempLabel := randName("temp-") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["y"], "np-multiple-appliedto").SetPriority(1.0) + builder = builder.SetName(getNS("y"), "np-multiple-appliedto").SetPriority(1.0) // Make it apply to an extra dummy AppliedTo to ensure it handles multiple AppliedToGroups correctly. // See https://github.com/antrea-io/antrea/issues/2083. if singleRule { builder.SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}, {PodSelector: map[string]string{tempLabel: ""}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "") } else { - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, []ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}, crdv1beta1.RuleActionDrop, "", "") - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, []ANNPAppliedToSpec{{PodSelector: map[string]string{tempLabel: ""}}}, crdv1beta1.RuleActionDrop, "", "") } reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) annp, err := k8sUtils.CreateOrUpdateANNP(builder.Get()) failOnError(err, t) @@ -2495,7 +2405,7 @@ func testANNPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { } t.Logf("Making the Policy apply to y/c by labeling it with the temporary label that matches the dummy AppliedTo") - podYC, err := k8sUtils.GetPodByLabel(namespaces["y"], "c") + podYC, err := k8sUtils.GetPodByLabel(getNS("y"), "c") if err != nil { t.Errorf("Failed to get Pod in Namespace y with label 'pod=c': %v", err) } @@ -2503,8 +2413,8 @@ func testANNPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { podYC, err = k8sUtils.clientset.CoreV1().Pods(podYC.Namespace).Update(context.TODO(), podYC, metav1.UpdateOptions{}) assert.NoError(t, err) reachability = NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/c"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "c"), Dropped) time.Sleep(networkPolicyDelay) k8sUtils.Validate(allPods, reachability, []int32{80}, ProtocolTCP) _, wrong, _ = reachability.Summary() @@ -2518,7 +2428,7 @@ func testANNPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { _, err = k8sUtils.clientset.CoreV1().Pods(podYC.Namespace).Update(context.TODO(), podYC, metav1.UpdateOptions{}) assert.NoError(t, err) reachability = NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) time.Sleep(networkPolicyDelay) k8sUtils.Validate(allPods, reachability, []int32{80}, ProtocolTCP) _, wrong, _ = reachability.Summary() @@ -2621,9 +2531,9 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName(npName). SetPriority(1.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", ruleName, nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", ruleName, nil) builder.AddEgressLogging(logLabel) npRef := fmt.Sprintf("AntreaClusterNetworkPolicy:%s", npName) @@ -2631,7 +2541,7 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { failOnError(err, t) failOnError(data.waitForACNPRealized(t, acnp.Name, policyRealizedTimeout), t) - podXA, err := k8sUtils.GetPodByLabel(namespaces["x"], "a") + podXA, err := k8sUtils.GetPodByLabel(getNS("x"), "a") if err != nil { t.Errorf("Failed to get Pod in Namespace x with label 'pod=a': %v", err) } @@ -2649,9 +2559,9 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { k8sUtils.Probe(ns1, pod1, ns2, pod2, p80, ProtocolTCP, nil, nil) }() } - oneProbe(namespaces["x"], "a", namespaces["z"], "a") - oneProbe(namespaces["x"], "a", namespaces["z"], "b") - oneProbe(namespaces["x"], "a", namespaces["z"], "c") + oneProbe(getNS("x"), "a", getNS("z"), "a") + oneProbe(getNS("x"), "a", getNS("z"), "b") + oneProbe(getNS("x"), "a", getNS("z"), "c") wg.Wait() // nodeName is guaranteed to be set at this stage, since the framework waits for all Pods to be in Running phase @@ -2664,23 +2574,23 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { // testAuditLoggingEnableK8s tests that audit logs are generated when K8s NP is applied // tests both Allow traffic by K8s NP and Drop traffic by implicit K8s policy drop func testAuditLoggingEnableK8s(t *testing.T, data *TestData) { - failOnError(data.updateNamespaceWithAnnotations(namespaces["x"], map[string]string{networkpolicy.EnableNPLoggingAnnotationKey: "true"}), t) + failOnError(data.updateNamespaceWithAnnotations(getNS("x"), map[string]string{networkpolicy.EnableNPLoggingAnnotationKey: "true"}), t) // Add a K8s namespaced NetworkPolicy in ns x that allow ingress traffic from // Pod x/b to x/a which default denies other ingress including from Pod x/c to x/a npName := "allow-x-b-to-x-a" k8sNPBuilder := &NetworkPolicySpecBuilder{} - k8sNPBuilder = k8sNPBuilder.SetName(namespaces["x"], npName). + k8sNPBuilder = k8sNPBuilder.SetName(getNS("x"), npName). SetPodSelector(map[string]string{"pod": "a"}). SetTypeIngress(). AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, nil, nil, nil) - npRef := fmt.Sprintf("K8sNetworkPolicy:%s/%s", namespaces["x"], npName) + npRef := fmt.Sprintf("K8sNetworkPolicy:%s/%s", getNS("x"), npName) knp, err := k8sUtils.CreateOrUpdateNetworkPolicy(k8sNPBuilder.Get()) failOnError(err, t) failOnError(waitForResourceReady(t, timeout, knp), t) - podXA, err := k8sUtils.GetPodByLabel(namespaces["x"], "a") + podXA, err := k8sUtils.GetPodByLabel(getNS("x"), "a") if err != nil { t.Errorf("Failed to get Pod in Namespace x with label 'pod=a': %v", err) } @@ -2702,16 +2612,16 @@ func testAuditLoggingEnableK8s(t *testing.T, data *TestData) { k8sUtils.Probe(ns1, pod1, ns2, pod2, p80, ProtocolTCP, nil, nil) }() } - oneProbe(namespaces["x"], "b", namespaces["x"], "a", matcher1) - oneProbe(namespaces["x"], "c", namespaces["x"], "a", matcher2) + oneProbe(getNS("x"), "b", getNS("x"), "a", matcher1) + oneProbe(getNS("x"), "c", getNS("x"), "a", matcher2) wg.Wait() // nodeName is guaranteed to be set at this stage, since the framework waits for all Pods to be in Running phase nodeName := podXA.Spec.NodeName checkAuditLoggingResult(t, data, nodeName, "K8sNetworkPolicy", append(matcher1.Matchers(), matcher2.Matchers()...)) - failOnError(k8sUtils.DeleteNetworkPolicy(namespaces["x"], "allow-x-b-to-x-a"), t) - failOnError(data.UpdateNamespace(namespaces["x"], func(namespace *v1.Namespace) { + failOnError(k8sUtils.DeleteNetworkPolicy(getNS("x"), "allow-x-b-to-x-a"), t) + failOnError(data.UpdateNamespace(getNS("x"), func(namespace *v1.Namespace) { delete(namespace.Annotations, networkpolicy.EnableNPLoggingAnnotationKey) }), t) } @@ -2719,23 +2629,23 @@ func testAuditLoggingEnableK8s(t *testing.T, data *TestData) { // testAuditLoggingK8sService tests that audit logs are generated for K8s Service access // tests both Allow traffic by K8s NP and Drop traffic by implicit K8s policy drop func testAuditLoggingK8sService(t *testing.T, data *TestData) { - failOnError(data.updateNamespaceWithAnnotations(namespaces["x"], map[string]string{networkpolicy.EnableNPLoggingAnnotationKey: "true"}), t) + failOnError(data.updateNamespaceWithAnnotations(getNS("x"), map[string]string{networkpolicy.EnableNPLoggingAnnotationKey: "true"}), t) // Create and expose nginx service on the same node as pod x/a - podXA, err := k8sUtils.GetPodByLabel(namespaces["x"], "a") + podXA, err := k8sUtils.GetPodByLabel(getNS("x"), "a") if err != nil { t.Errorf("Failed to get Pod in Namespace x with label 'pod=a': %v", err) } serverNode := podXA.Spec.NodeName serviceName := "nginx" - serverPodName, serverIP, nginxCleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", serverNode, namespaces["x"], false) + serverPodName, serverIP, nginxCleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", serverNode, getNS("x"), false) defer nginxCleanupFunc() serverPort := int32(80) ipFamily := v1.IPv4Protocol - if IPFamily(podIPs[namespaces["x"]+"/a"][0]) == "v6" { + if IPFamily(podIPs[getPodName("x", "a")][0]) == "v6" { ipFamily = v1.IPv6Protocol } - service, err := data.CreateService(serviceName, namespaces["x"], serverPort, serverPort, map[string]string{"app": "nginx"}, false, false, v1.ServiceTypeClusterIP, &ipFamily) + service, err := data.CreateService(serviceName, getNS("x"), serverPort, serverPort, map[string]string{"app": "nginx"}, false, false, v1.ServiceTypeClusterIP, &ipFamily) if err != nil { t.Fatalf("Error when creating nginx service: %v", err) } @@ -2745,12 +2655,12 @@ func testAuditLoggingK8sService(t *testing.T, data *TestData) { // Pod x/a to service nginx which default denies other ingress including from Pod x/b to service nginx npName := "allow-xa-to-service" k8sNPBuilder := &NetworkPolicySpecBuilder{} - k8sNPBuilder = k8sNPBuilder.SetName(namespaces["x"], npName). + k8sNPBuilder = k8sNPBuilder.SetName(getNS("x"), npName). SetPodSelector(map[string]string{"app": serviceName}). SetTypeIngress(). AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "a"}, nil, nil, nil) - npRef := fmt.Sprintf("K8sNetworkPolicy:%s/%s", namespaces["x"], npName) + npRef := fmt.Sprintf("K8sNetworkPolicy:%s/%s", getNS("x"), npName) knp, err := k8sUtils.CreateOrUpdateNetworkPolicy(k8sNPBuilder.Get()) failOnError(err, t) @@ -2761,7 +2671,7 @@ func testAuditLoggingK8sService(t *testing.T, data *TestData) { // matcher2 is for connections dropped by the isolated behavior of the K8s NP matcher2 := NewAuditLogMatcher("K8sNetworkPolicy", "", "Ingress", "Drop") - appliedToRef := fmt.Sprintf("%s/%s", namespaces["x"], serverPodName) + appliedToRef := fmt.Sprintf("%s/%s", getNS("x"), serverPodName) // generate some traffic that wget the nginx service var wg sync.WaitGroup @@ -2776,40 +2686,38 @@ func testAuditLoggingK8sService(t *testing.T, data *TestData) { }() } } - oneProbe(namespaces["x"], "a", matcher1) - oneProbe(namespaces["x"], "b", matcher2) + oneProbe(getNS("x"), "a", matcher1) + oneProbe(getNS("x"), "b", matcher2) wg.Wait() checkAuditLoggingResult(t, data, serverNode, "K8sNetworkPolicy", append(matcher1.Matchers(), matcher2.Matchers()...)) - failOnError(k8sUtils.DeleteNetworkPolicy(namespaces["x"], npName), t) - failOnError(data.UpdateNamespace(namespaces["x"], func(namespace *v1.Namespace) { + failOnError(k8sUtils.DeleteNetworkPolicy(getNS("x"), npName), t) + failOnError(data.UpdateNamespace(getNS("x"), func(namespace *v1.Namespace) { delete(namespace.Annotations, networkpolicy.EnableNPLoggingAnnotationKey) }), t) } func testAppliedToPerRule(t *testing.T) { builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["y"], "np1").SetPriority(1.0) + builder = builder.SetName(getNS("y"), "np1").SetPriority(1.0) annpATGrp1 := ANNPAppliedToSpec{PodSelector: map[string]string{"pod": "a"}, PodSelectorMatchExp: nil} annpATGrp2 := ANNPAppliedToSpec{PodSelector: map[string]string{"pod": "b"}, PodSelectorMatchExp: nil} - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, []ANNPAppliedToSpec{annpATGrp1}, crdv1beta1.RuleActionDrop, "", "") - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["z"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("z")}, nil, nil, nil, nil, []ANNPAppliedToSpec{annpATGrp2}, crdv1beta1.RuleActionDrop, "", "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["y"]+"/b"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("z", "b"), getPod("y", "b"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } @@ -2817,30 +2725,27 @@ func testAppliedToPerRule(t *testing.T) { builder2 = builder2.SetName("cnp1").SetPriority(1.0) cnpATGrp1 := ACNPAppliedToSpec{PodSelector: map[string]string{"pod": "a"}, PodSelectorMatchExp: nil} cnpATGrp2 := ACNPAppliedToSpec{ - PodSelector: map[string]string{"pod": "b"}, NSSelector: map[string]string{"ns": namespaces["y"]}, + PodSelector: map[string]string{"pod": "b"}, NSSelector: map[string]string{"ns": getNS("y")}, PodSelectorMatchExp: nil, NSSelectorMatchExp: nil} - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, - nil, nil, nil, false, []ACNPAppliedToSpec{cnpATGrp1}, crdv1beta1.RuleActionDrop, "", "", nil) - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, []ACNPAppliedToSpec{cnpATGrp2}, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, + nil, nil, nil, nil, []ACNPAppliedToSpec{cnpATGrp1}, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, []ACNPAppliedToSpec{cnpATGrp2}, crdv1beta1.RuleActionDrop, "", "", nil) reachability2 := NewReachability(allPods, Connected) - reachability2.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachability2.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) - reachability2.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["z"]+"/a"), Dropped) - reachability2.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["y"]+"/b"), Dropped) + reachability2.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) + reachability2.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) + reachability2.Expect(getPod("x", "b"), getPod("z", "a"), Dropped) + reachability2.Expect(getPod("z", "b"), getPod("y", "b"), Dropped) testStep2 := []*TestStep{ { - "Port 80", - reachability2, - []metav1.Object{builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability2, + TestResources: []metav1.Object{builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } - testCase := []*TestCase{ {"ANNP AppliedTo per rule", testStep}, {"ACNP AppliedTo per rule", testStep2}, @@ -2849,47 +2754,45 @@ func testAppliedToPerRule(t *testing.T) { } func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) { - svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) - svc2 := k8sUtils.BuildService("svc2", namespaces["y"], 80, 80, map[string]string{"app": "b"}, nil) + svc1 := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "a"}, nil) + svc2 := k8sUtils.BuildService("svc2", getNS("y"), 80, 80, map[string]string{"app": "b"}, nil) cg1Name, cg2Name := "cg-svc1", "cg-svc2" cgBuilder1 := &ClusterGroupSpecBuilder{} - cgBuilder1 = cgBuilder1.SetName(cg1Name).SetServiceReference(namespaces["x"], "svc1") + cgBuilder1 = cgBuilder1.SetName(cg1Name).SetServiceReference(getNS("x"), "svc1") cgBuilder2 := &ClusterGroupSpecBuilder{} - cgBuilder2 = cgBuilder2.SetName(cg2Name).SetServiceReference(namespaces["y"], "svc2") + cgBuilder2 = cgBuilder2.SetName(cg2Name).SetServiceReference(getNS("y"), "svc2") builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("cnp-cg-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cg1Name}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - false, nil, crdv1beta1.RuleActionDrop, cg2Name, "", nil) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cg2Name, "", nil) // Pods backing svc1 (label pod=a) in Namespace x should not allow ingress from Pods backing svc2 (label pod=b) in Namespace y. reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("y", "b"), getPod("x", "a"), Dropped) testStep1 := &TestStep{ - "Port 80", - reachability, - []metav1.Object{svc1, svc2, cgBuilder1.Get(), cgBuilder2.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{svc1, svc2, cgBuilder1.Get(), cgBuilder2.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } // Test update selector of Service referred in cg-svc1, and update serviceReference of cg-svc2. - svc1Updated := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "b"}, nil) - svc3 := k8sUtils.BuildService("svc3", namespaces["y"], 80, 80, map[string]string{"app": "a"}, nil) + svc1Updated := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "b"}, nil) + svc3 := k8sUtils.BuildService("svc3", getNS("y"), 80, 80, map[string]string{"app": "a"}, nil) svc1PodName := randName("test-pod-svc1-") svc3PodName := randName("test-pod-svc3-") - cgBuilder2Updated := cgBuilder2.SetServiceReference(namespaces["y"], "svc3") + cgBuilder2Updated := cgBuilder2.SetServiceReference(getNS("y"), "svc3") cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["y"], svc3PodName), + Pod: NewPod(getNS("y"), svc3PodName), Labels: map[string]string{"pod": svc3PodName, "app": "a"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["x"], svc1PodName), + Pod: NewPod(getNS("x"), svc1PodName), Labels: map[string]string{"pod": svc1PodName, "app": "b"}, }, ExpectConnectivity: Dropped, @@ -2899,32 +2802,29 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) // Pods backing svc1 (label pod=b) in namespace x should not allow ingress from Pods backing svc3 (label pod=a) in namespace y. reachability2 := NewReachability(allPods, Connected) - reachability2.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) + reachability2.Expect(getPod("y", "a"), getPod("x", "b"), Dropped) testStep2 := &TestStep{ - "Port 80 updated", - reachability2, - []metav1.Object{svc1Updated, svc3, cgBuilder1.Get(), cgBuilder2Updated.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + Name: "Port 80 updated", + Reachability: reachability2, + TestResources: []metav1.Object{svc1Updated, svc3, cgBuilder1.Get(), cgBuilder2Updated.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, } builderUpdated := &ClusterNetworkPolicySpecBuilder{} builderUpdated = builderUpdated.SetName("cnp-cg-svc-ref").SetPriority(1.0) - builderUpdated.SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builderUpdated.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["y"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builderUpdated.SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) + builderUpdated.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("y")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // Pod x/a should not allow ingress from y/b per the updated ACNP spec. testStep3 := &TestStep{ - "Port 80 ACNP spec updated to selector", - reachability, - []metav1.Object{builderUpdated.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 ACNP spec updated to selector", + Reachability: reachability, + TestResources: []metav1.Object{builderUpdated.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testSteps := []*TestStep{testStep1, testStep2, testStep3} @@ -2935,18 +2835,18 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) } func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { - svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) + svc1 := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "a"}, nil) svc1PodName := randName("test-pod-svc1-") cg1Name, cg2Name, cg3Name := "cg-svc-x-a", "cg-select-y-b", "cg-select-y-c" cgBuilder1 := &ClusterGroupSpecBuilder{} - cgBuilder1 = cgBuilder1.SetName(cg1Name).SetServiceReference(namespaces["x"], "svc1") + cgBuilder1 = cgBuilder1.SetName(cg1Name).SetServiceReference(getNS("x"), "svc1") cgBuilder2 := &ClusterGroupSpecBuilder{} cgBuilder2 = cgBuilder2.SetName(cg2Name). - SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil). SetPodSelector(map[string]string{"pod": "b"}, nil) cgBuilder3 := &ClusterGroupSpecBuilder{} cgBuilder3 = cgBuilder3.SetName(cg3Name). - SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil). SetPodSelector(map[string]string{"pod": "c"}, nil) cgNestedName := "cg-nested" cgBuilderNested := &ClusterGroupSpecBuilder{} @@ -2954,42 +2854,40 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("cnp-nested-cg").SetPriority(1.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["z"]}}}). - AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - false, nil, crdv1beta1.RuleActionDrop, cgNestedName, "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("z")}}}). + AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgNestedName, "", nil) // Pods in Namespace z should not allow traffic from Pods backing svc1 (label pod=a) in Namespace x. // Note that in this testStep cg3 will not be created yet, so even though cg-nested selects cg1 and // cg3 as childGroups, only members of cg1 will be included as this time. reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) testStep1 := &TestStep{ - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the ClusterGroup is created after the ACNP - []metav1.Object{builder.Get(), svc1, cgBuilder1.Get(), cgBuilderNested.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), svc1, cgBuilder1.Get(), cgBuilderNested.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } // Test update "cg-nested" to include "cg-select-y-b" as well. cgBuilderNested = cgBuilderNested.SetChildGroups([]string{cg1Name, cg2Name, cg3Name}) // In addition to x/a, all traffic from y/b to Namespace z should also be denied. reachability2 := NewReachability(allPods, Connected) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["y"]+"/b"), namespaces["z"], Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("y", "b"), getNS("z"), Dropped) // New member in cg-svc-x-a should be reflected in cg-nested as well. cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["x"], svc1PodName), + Pod: NewPod(getNS("x"), svc1PodName), Labels: map[string]string{"pod": svc1PodName, "app": "a"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["z"], "test-add-pod-ns-z"), + Pod: NewPod(getNS("z"), "test-add-pod-ns-z"), Labels: map[string]string{"pod": "test-add-pod-ns-z"}, }, ExpectConnectivity: Dropped, @@ -2997,29 +2895,26 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { }, } testStep2 := &TestStep{ - "Port 80 updated", - reachability2, - []metav1.Object{cgBuilder2.Get(), cgBuilderNested.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + Name: "Port 80 updated", + Reachability: reachability2, + TestResources: []metav1.Object{cgBuilder2.Get(), cgBuilderNested.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, } // In this testStep cg3 is created. It's members should reflect in cg-nested // and as a result, all traffic from y/c to Namespace z should be denied as well. reachability3 := NewReachability(allPods, Connected) - reachability3.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability3.ExpectEgressToNamespace(Pod(namespaces["y"]+"/b"), namespaces["z"], Dropped) - reachability3.ExpectEgressToNamespace(Pod(namespaces["y"]+"/c"), namespaces["z"], Dropped) + reachability3.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability3.ExpectEgressToNamespace(getPod("y", "b"), getNS("z"), Dropped) + reachability3.ExpectEgressToNamespace(getPod("y", "c"), getNS("z"), Dropped) testStep3 := &TestStep{ - "Port 80 updated", - reachability3, - []metav1.Object{cgBuilder3.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 updated", + Reachability: reachability3, + TestResources: []metav1.Object{cgBuilder3.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testSteps := []*TestStep{testStep1, testStep2, testStep3} @@ -3030,8 +2925,8 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { } func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { - podXAIP, _ := podIPs[namespaces["x"]+"/a"] - podXBIP, _ := podIPs[namespaces["x"]+"/b"] + podXAIP, _ := podIPs[getPodName("x", "a")] + podXBIP, _ := podIPs[getPodName("x", "b")] genCIDR := func(ip string) string { switch IPFamily(ip) { case "v4": @@ -3062,43 +2957,39 @@ func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{ { PodSelector: map[string]string{"pod": "a"}, - NSSelector: map[string]string{"ns": namespaces["y"]}, + NSSelector: map[string]string{"ns": getNS("y")}, }, }) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) testStep := &TestStep{ - "Port 80", - reachability, - []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilder2.Get(), cgParent.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilder2.Get(), cgParent.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } cgBuilder3 := &ClusterGroupSpecBuilder{} cgBuilder3 = cgBuilder3.SetName(cg3Name). - SetNamespaceSelector(map[string]string{"ns": namespaces["x"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("x")}, nil). SetPodSelector(map[string]string{"pod": "c"}, nil) updatedCGParent := &ClusterGroupSpecBuilder{} updatedCGParent = updatedCGParent.SetName(cgParentName).SetChildGroups([]string{cg1Name, cg3Name}) reachability2 := NewReachability(allPods, Connected) - reachability2.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) - reachability2.Expect(Pod(namespaces["x"]+"/c"), Pod(namespaces["y"]+"/a"), Dropped) + reachability2.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) + reachability2.Expect(getPod("x", "c"), getPod("y", "a"), Dropped) testStep2 := &TestStep{ - "Port 80, updated", - reachability2, - []metav1.Object{cgBuilder3.Get(), updatedCGParent.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80, updated", + Reachability: reachability2, + TestResources: []metav1.Object{cgBuilder3.Get(), updatedCGParent.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testCase := []*TestCase{ @@ -3114,21 +3005,19 @@ func testACNPNamespaceIsolation(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) // deny ingress traffic except from own namespace, which is always allowed. - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - true, nil, crdv1beta1.RuleActionAllow, "", "", nil) - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, - false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, selfNamespace, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, + nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Dropped) reachability.ExpectAllSelfNamespace(Connected) testStep1 := &TestStep{ - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } builder2 := &ClusterNetworkPolicySpecBuilder{} @@ -3136,25 +3025,23 @@ func testACNPNamespaceIsolation(t *testing.T) { SetTier("baseline"). SetPriority(1.0) builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - true, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1beta1.RuleActionAllow, "", "", nil) + selfNamespace, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}, crdv1beta1.RuleActionAllow, "", "", nil) builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, - false, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1beta1.RuleActionDrop, "", "", nil) + nil, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}, crdv1beta1.RuleActionDrop, "", "", nil) reachability2 := NewReachability(allPods, Connected) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/b"), namespaces["y"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/b"), namespaces["z"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/c"), namespaces["y"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/c"), namespaces["z"], Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "a"), getNS("y"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "b"), getNS("y"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "b"), getNS("z"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "c"), getNS("y"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "c"), getNS("z"), Dropped) testStep2 := &TestStep{ - "Port 80", - reachability2, - []metav1.Object{builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability2, + TestResources: []metav1.Object{builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testCase := []*TestCase{ @@ -3171,51 +3058,164 @@ func testACNPStrictNamespacesIsolation(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - true, nil, crdv1beta1.RuleActionPass, "", "", nil) + selfNamespace, nil, crdv1beta1.RuleActionPass, "", "", nil) builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, - false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // deny ingress traffic except from own namespace, which is delegated to Namespace owners (who can create K8s // NetworkPolicies to regulate intra-Namespace traffic) reachability := NewReachability(allPods, Dropped) reachability.ExpectAllSelfNamespace(Connected) testStep1 := &TestStep{ - "Namespace isolation, Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Namespace isolation, Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } // Add a K8s namespaced NetworkPolicy in ns x that isolates all Pods in that namespace. builder2 := &NetworkPolicySpecBuilder{} - builder2 = builder2.SetName(namespaces["x"], "default-deny-in-namespace-x") + builder2 = builder2.SetName(getNS("x"), "default-deny-in-namespace-x") builder2.SetTypeIngress() reachability2 := NewReachability(allPods, Dropped) reachability2.ExpectAllSelfNamespace(Connected) - reachability2.ExpectSelfNamespace(namespaces["x"], Dropped) + reachability2.ExpectSelfNamespace(getNS("x"), Dropped) reachability2.ExpectSelf(allPods, Connected) testStep2 := &TestStep{ - "Namespace isolation with K8s NP, Port 80", - reachability2, - []metav1.Object{builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Namespace isolation with K8s NP, Port 80", + Reachability: reachability2, + TestResources: []metav1.Object{builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testCase := []*TestCase{ - {"ACNP strict Namespace isolation for all namespaces", []*TestStep{testStep1, testStep2}}, + {"ACNP strict Namespace isolation for all Namespaces", []*TestStep{testStep1, testStep2}}, } executeTests(t, testCase) } +func testACNPStrictNamespacesIsolationByLabels(t *testing.T) { + samePurposeTierLabels := &crdv1beta1.PeerNamespaces{ + SameLabels: []string{"purpose", "tier"}, + } + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("test-acnp-strict-ns-isolation-by-labels"). + SetTier("securityops"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + samePurposeTierLabels, nil, crdv1beta1.RuleActionPass, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, + nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) + // prod1 and prod2 Namespaces should be able to connect to each other. The same goes for dev1 and + // dev2 Namespaces. However, any prod Namespace should not be able to connect to any dev Namespace + // due to different "tier" label values. For the "no-tier" Namespace, the first ingress rule will + // have no effect because the Namespace does not have a "tier" label. So every Pod in that Namespace + // will be isolated according to the second rule of the ACNP. + reachability := NewReachability(allPods, Dropped) + reachability.ExpectNamespaceIngressFromNamespace(getNS("prod1"), getNS("prod2"), Connected) + reachability.ExpectNamespaceEgressToNamespace(getNS("prod1"), getNS("prod2"), Connected) + reachability.ExpectNamespaceIngressFromNamespace(getNS("prod2"), getNS("prod1"), Connected) + reachability.ExpectNamespaceEgressToNamespace(getNS("prod2"), getNS("prod1"), Connected) + reachability.ExpectNamespaceIngressFromNamespace(getNS("dev1"), getNS("dev2"), Connected) + reachability.ExpectNamespaceEgressToNamespace(getNS("dev1"), getNS("dev2"), Connected) + reachability.ExpectNamespaceIngressFromNamespace(getNS("dev2"), getNS("dev1"), Connected) + reachability.ExpectNamespaceEgressToNamespace(getNS("dev2"), getNS("dev1"), Connected) + reachability.ExpectAllSelfNamespace(Connected) + reachability.ExpectSelfNamespace(getNS("no-tier"), Dropped) + reachability.ExpectSelf(allPods, Connected) + + testStep := &TestStep{ + Name: "Namespace isolation by label, Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + } + testCase := []*TestCase{ + {"ACNP strict Namespace isolation by Namespace purpose and tier labels", []*TestStep{testStep}}, + } + executeTests(t, testCase) +} + +func testACNPStrictNamespacesIsolationBySingleLabel(t *testing.T, data *TestData) { + samePurposeTierLabels := &crdv1beta1.PeerNamespaces{ + SameLabels: []string{"purpose"}, + } + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("test-acnp-strict-ns-isolation-by-single-purpose-label"). + SetTier("securityops"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + samePurposeTierLabels, nil, crdv1beta1.RuleActionPass, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, + nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) + // Namespaces are split into two logical groups, purpose=test (prod1,2 and dev1,2) and purpose=test-exclusion + // (no-tier). The two groups of Namespace should not be able to connect to each other. + reachability := NewReachability(allPods, Connected) + reachability.ExpectNamespaceEgressToNamespace(getNS("prod1"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceEgressToNamespace(getNS("prod2"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceEgressToNamespace(getNS("dev1"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceEgressToNamespace(getNS("dev2"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceIngressFromNamespace(getNS("prod1"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceIngressFromNamespace(getNS("prod2"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceIngressFromNamespace(getNS("dev1"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceIngressFromNamespace(getNS("dev2"), getNS("no-tier"), Dropped) + + testStep := &TestStep{ + Name: "Namespace isolation by single label, Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + } + + labelNoTierNS := func() { + nsReturned, err := data.clientset.CoreV1().Namespaces().Get(context.TODO(), getNS("no-tier"), metav1.GetOptions{}) + if err != nil { + t.Errorf("failed to get the Namespace that has no tier label") + } + nsReturned.Labels = map[string]string{ + "purpose": "test", + } + log.Infof("Updating no-tier Namespace purpose label") + if _, err = data.clientset.CoreV1().Namespaces().Update(context.TODO(), nsReturned, metav1.UpdateOptions{}); err != nil { + t.Errorf("failed to update the no-tier Namespace with purpose=test label") + } + } + revertLabel := func() { + nsReturned, err := data.clientset.CoreV1().Namespaces().Get(context.TODO(), getNS("no-tier"), metav1.GetOptions{}) + if err != nil { + t.Errorf("failed to get the no-tier Namespace") + } + nsReturned.Labels = map[string]string{ + "purpose": "test-exclusion", + } + if _, err = data.clientset.CoreV1().Namespaces().Update(context.TODO(), nsReturned, metav1.UpdateOptions{}); err != nil { + t.Errorf("failed to revert the purpose label for the no-tier Namespace") + } + } + newReachability := NewReachability(allPods, Connected) + testSetp2 := &TestStep{ + Name: "Namespace isolation after Namespace label update, Port 80", + Reachability: newReachability, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomSetup: labelNoTierNS, + CustomTeardown: revertLabel, + } + testCase := []*TestCase{ + {"ACNP strict Namespace isolation by Namespace purpose label", []*TestStep{testStep, testSetp2}}, + } + executeTestsWithData(t, testCase, data) +} + func testFQDNPolicy(t *testing.T) { // The ipv6-only test env doesn't have IPv6 access to the web. skipIfNotIPv4Cluster(t) - // It is convenient to have higher log verbosity for FQDNtests for troubleshooting failures. + // It is convenient to have higher log verbosity for FQDN tests for troubleshooting failures. logLevel := log.GetLevel() log.SetLevel(log.TraceLevel) defer log.SetLevel(logLevel) @@ -3240,31 +3240,31 @@ func testFQDNPolicy(t *testing.T) { // All client Pods below are randomly chosen from test Namespaces. testcases := []podToAddrTestStep{ { - Pod(namespaces["x"] + "/a"), + getPod("x", "a"), "docs.github.com", 80, Rejected, }, { - Pod(namespaces["x"] + "/b"), + getPod("x", "b"), "api.github.com", 80, Rejected, }, { - Pod(namespaces["y"] + "/a"), + getPod("y", "a"), "wayfair.com", 80, Dropped, }, { - Pod(namespaces["y"] + "/b"), + getPod("y", "b"), "stackoverflow.com", 80, Dropped, }, { - Pod(namespaces["z"] + "/a"), + getPod("z", "a"), "facebook.com", 80, Connected, @@ -3292,7 +3292,7 @@ func testFQDNPolicy(t *testing.T) { // policies, to avoid having a dependency on external connectivity. The reason we // use headless Service is that FQDN will use the IP from DNS A/AAAA records to // implement flows in the egress policy table. For a non-headless Service, the DNS -// name resolves to the ClusterIP for the Service. But when traffic arrives to the +// Name resolves to the ClusterIP for the Service. But when traffic arrives to the // egress table, the dstIP has already been DNATed to the Endpoints IP by // AntreaProxy Service Load-Balancing, and the policies are not enforced correctly. // For a headless Service, the Endpoints IP will be directly returned by the DNS @@ -3303,13 +3303,13 @@ func testFQDNPolicyInClusterService(t *testing.T) { defer log.SetLevel(logLevel) var services []*v1.Service if clusterInfo.podV4NetworkCIDR != "" { - ipv4Svc := k8sUtils.BuildService("ipv4-svc", namespaces["x"], 80, 80, map[string]string{"pod": "a"}, nil) + ipv4Svc := k8sUtils.BuildService("ipv4-svc", getNS("x"), 80, 80, map[string]string{"pod": "a"}, nil) ipv4Svc.Spec.ClusterIP = "None" ipv4Svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv4Protocol} services = append(services, ipv4Svc) } if clusterInfo.podV6NetworkCIDR != "" { - ipv6Svc := k8sUtils.BuildService("ipv6-svc", namespaces["x"], 80, 80, map[string]string{"pod": "b"}, nil) + ipv6Svc := k8sUtils.BuildService("ipv6-svc", getNS("x"), 80, 80, map[string]string{"pod": "b"}, nil) ipv6Svc.Spec.ClusterIP = "None" ipv6Svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv6Protocol} services = append(services, ipv6Svc) @@ -3329,8 +3329,8 @@ func testFQDNPolicyInClusterService(t *testing.T) { SetTier("application"). SetPriority(1.0) for idx, service := range services { - builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["y"]}, PodSelector: map[string]string{"pod": "b"}}}, crdv1beta1.RuleActionReject) - builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2+1), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["z"]}, PodSelector: map[string]string{"pod": "c"}}}, crdv1beta1.RuleActionDrop) + builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("y")}, PodSelector: map[string]string{"pod": "b"}}}, crdv1beta1.RuleActionReject) + builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2+1), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("z")}, PodSelector: map[string]string{"pod": "c"}}}, crdv1beta1.RuleActionDrop) } acnp := builder.Get() k8sUtils.CreateOrUpdateACNP(acnp) @@ -3340,8 +3340,8 @@ func testFQDNPolicyInClusterService(t *testing.T) { for _, service := range services { eachServiceCases := []podToAddrTestStep{ { - Pod(namespaces["y"] + "/b"), - // To indicate the server name is a FQDN, end it with a dot. Then DNS resolver won't attempt to append + getPod("y", "b"), + // To indicate the server Name is a FQDN, end it with a dot. Then DNS resolver won't attempt to append // domain names (e.g. svc.cluster.local, cluster.local) when resolving it, making it get resolution // result more quickly. svcDNSName(service) + ".", @@ -3349,13 +3349,13 @@ func testFQDNPolicyInClusterService(t *testing.T) { Rejected, }, { - Pod(namespaces["z"] + "/c"), + getPod("z", "c"), svcDNSName(service) + ".", 80, Dropped, }, { - Pod(namespaces["x"] + "/c"), + getPod("x", "c"), svcDNSName(service) + ".", 80, Connected, @@ -3386,7 +3386,7 @@ func testFQDNPolicyInClusterService(t *testing.T) { func testFQDNPolicyTCP(t *testing.T) { // The ipv6-only test env doesn't have IPv6 access to the web. skipIfNotIPv4Cluster(t) - // It is convenient to have higher log verbosity for FQDNtests for troubleshooting failures. + // It is convenient to have higher log verbosity for FQDN tests for troubleshooting failures. logLevel := log.GetLevel() log.SetLevel(log.TraceLevel) defer log.SetLevel(logLevel) @@ -3398,7 +3398,7 @@ func testFQDNPolicyTCP(t *testing.T) { builder.AddFQDNRule("github.com", ProtocolTCP, nil, nil, nil, "", nil, crdv1beta1.RuleActionDrop) testcases := []podToAddrTestStep{ { - Pod(namespaces["y"] + "/a"), + getPod("y", "a"), "github.com", 80, Dropped, @@ -3431,12 +3431,12 @@ func testToServices(t *testing.T, data *TestData) { skipIfProxyDisabled(t, data) var services []*v1.Service if clusterInfo.podV4NetworkCIDR != "" { - ipv4Svc := k8sUtils.BuildService("ipv4-svc", namespaces["x"], 81, 81, map[string]string{"pod": "a"}, nil) + ipv4Svc := k8sUtils.BuildService("ipv4-svc", getNS("x"), 81, 81, map[string]string{"pod": "a"}, nil) ipv4Svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv4Protocol} services = append(services, ipv4Svc) } if clusterInfo.podV6NetworkCIDR != "" { - ipv6Svc := k8sUtils.BuildService("ipv6-svc", namespaces["x"], 80, 80, map[string]string{"pod": "a"}, nil) + ipv6Svc := k8sUtils.BuildService("ipv6-svc", getNS("x"), 80, 80, map[string]string{"pod": "a"}, nil) ipv6Svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv6Protocol} services = append(services, ipv6Svc) } @@ -3457,8 +3457,8 @@ func testToServices(t *testing.T, data *TestData) { builder = builder.SetName("test-acnp-to-services"). SetTier("application"). SetPriority(1.0) - builder.AddToServicesRule(svcRefs, "x-to-svc", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1beta1.RuleActionDrop) - builder.AddToServicesRule(svcRefs, "y-to-svc", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["y"]}}}, crdv1beta1.RuleActionDrop) + builder.AddToServicesRule(svcRefs, "x-to-svc", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}, crdv1beta1.RuleActionDrop) + builder.AddToServicesRule(svcRefs, "y-to-svc", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("y")}}}, crdv1beta1.RuleActionDrop) time.Sleep(networkPolicyDelay) acnp := builder.Get() @@ -3469,19 +3469,19 @@ func testToServices(t *testing.T, data *TestData) { for _, service := range builtSvcs { eachServiceCases := []podToAddrTestStep{ { - Pod(namespaces["x"] + "/a"), + getPod("x", "a"), service.Spec.ClusterIP, service.Spec.Ports[0].Port, Dropped, }, { - Pod(namespaces["y"] + "/b"), + getPod("y", "b"), service.Spec.ClusterIP, service.Spec.Ports[0].Port, Dropped, }, { - Pod(namespaces["z"] + "/c"), + Pod(getNS("z") + "/c"), service.Spec.ClusterIP, service.Spec.Ports[0].Port, Connected, @@ -3509,21 +3509,21 @@ func testToServices(t *testing.T, data *TestData) { } func testServiceAccountSelector(t *testing.T, data *TestData) { - k8sUtils.CreateOrUpdateServiceAccount(k8sUtils.BuildServiceAccount("test-sa", namespaces["x"], nil)) - defer k8sUtils.DeleteServiceAccount(namespaces["x"], "test-sa") + k8sUtils.CreateOrUpdateServiceAccount(k8sUtils.BuildServiceAccount("test-sa", getNS("x"), nil)) + defer k8sUtils.DeleteServiceAccount(getNS("x"), "test-sa") serverName, serverIP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server", controlPlaneNodeName(), data.testNamespace, false) defer cleanupFunc() - client0Name, _, cleanupFunc := createAndWaitForPodWithServiceAccount(t, data, data.createAgnhostPodWithSAOnNode, "client", controlPlaneNodeName(), namespaces["x"], false, "test-sa") + client0Name, _, cleanupFunc := createAndWaitForPodWithServiceAccount(t, data, data.createAgnhostPodWithSAOnNode, "client", controlPlaneNodeName(), getNS("x"), false, "test-sa") defer cleanupFunc() - client1Name, _, cleanupFunc := createAndWaitForPodWithServiceAccount(t, data, data.createAgnhostPodWithSAOnNode, "client", controlPlaneNodeName(), namespaces["x"], false, "default") + client1Name, _, cleanupFunc := createAndWaitForPodWithServiceAccount(t, data, data.createAgnhostPodWithSAOnNode, "client", controlPlaneNodeName(), getNS("x"), false, "default") defer cleanupFunc() sa := &crdv1beta1.NamespacedName{ Name: "test-sa", - Namespace: namespaces["x"], + Namespace: getNS("x"), } builder := &ClusterNetworkPolicySpecBuilder{} @@ -3531,7 +3531,7 @@ func testServiceAccountSelector(t *testing.T, data *TestData) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": serverName}}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", sa) + nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", sa) acnp := builder.Get() _, err := k8sUtils.CreateOrUpdateACNP(acnp) @@ -3544,13 +3544,13 @@ func testServiceAccountSelector(t *testing.T, data *TestData) { if clusterInfo.podV4NetworkCIDR != "" { ipv4Testcases := []podToAddrTestStep{ { - Pod(namespaces["x"] + "/" + client0Name), + getPod("x", client0Name), serverIP.IPv4.String(), 80, Dropped, }, { - Pod(namespaces["x"] + "/" + client1Name), + getPod("x", client1Name), serverIP.IPv4.String(), 80, Connected, @@ -3562,13 +3562,13 @@ func testServiceAccountSelector(t *testing.T, data *TestData) { if clusterInfo.podV6NetworkCIDR != "" { ipv6Testcases := []podToAddrTestStep{ { - Pod(namespaces["x"] + "/" + client0Name), + getPod("x", client0Name), serverIP.IPv6.String(), 80, Dropped, }, { - Pod(namespaces["x"] + "/" + client1Name), + getPod("x", client1Name), serverIP.IPv6.String(), 80, Connected, @@ -3597,20 +3597,20 @@ func testACNPNodeSelectorEgress(t *testing.T) { SetPriority(1.0) nodeSelector := metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/hostname": controlPlaneNodeName()}} builder.AddNodeSelectorRule(&nodeSelector, ProtocolTCP, &p6443, "egress-control-plane-drop", - []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}, PodSelector: map[string]string{"pod": "a"}}}, + []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}, PodSelector: map[string]string{"pod": "a"}}}, crdv1beta1.RuleActionDrop, true) var testcases []podToAddrTestStep if clusterInfo.podV4NetworkCIDR != "" { ipv4Testcases := []podToAddrTestStep{ { - Pod(namespaces["x"] + "/a"), + getPod("x", "a"), controlPlaneNodeIPv4(), 6443, Dropped, }, { - Pod(namespaces["x"] + "/b"), + getPod("x", "b"), controlPlaneNodeIPv4(), 6443, Connected, @@ -3622,13 +3622,13 @@ func testACNPNodeSelectorEgress(t *testing.T) { if clusterInfo.podV6NetworkCIDR != "" { ipv6Testcases := []podToAddrTestStep{ { - Pod(namespaces["x"] + "/a"), + getPod("x", "a"), controlPlaneNodeIPv6(), 6443, Dropped, }, { - Pod(namespaces["x"] + "/b"), + getPod("x", "b"), controlPlaneNodeIPv6(), 6443, Connected, @@ -3655,16 +3655,16 @@ func testACNPNodeSelectorEgress(t *testing.T) { } func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { - _, serverIP0, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server0", nodeName(1), namespaces["x"], false) + _, serverIP0, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server0", nodeName(1), getNS("x"), false) defer cleanupFunc() - _, serverIP1, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server1", nodeName(1), namespaces["y"], false) + _, serverIP1, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server1", nodeName(1), getNS("y"), false) defer cleanupFunc() clientName := "agnhost-client" - require.NoError(t, data.createAgnhostPodOnNode(clientName, namespaces["z"], controlPlaneNodeName(), true)) - defer data.DeletePodAndWait(defaultTimeout, clientName, namespaces["z"]) - _, err := data.podWaitForIPs(defaultTimeout, clientName, namespaces["z"]) + require.NoError(t, data.createAgnhostPodOnNode(clientName, getNS("z"), controlPlaneNodeName(), true)) + defer data.DeletePodAndWait(defaultTimeout, clientName, getNS("z")) + _, err := data.podWaitForIPs(defaultTimeout, clientName, getNS("z")) require.NoError(t, err) builder := &ClusterNetworkPolicySpecBuilder{} @@ -3672,20 +3672,20 @@ func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { SetPriority(1.0) nodeSelector := metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/hostname": controlPlaneNodeName()}} builder.AddNodeSelectorRule(&nodeSelector, ProtocolTCP, &p80, "ingress-control-plane-drop", - []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, + []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}, crdv1beta1.RuleActionDrop, false) testcases := []podToAddrTestStep{} if clusterInfo.podV4NetworkCIDR != "" { ipv4TestCases := []podToAddrTestStep{ { - Pod(namespaces["z"] + "/" + clientName), + getPod("z", clientName), serverIP0.IPv4.String(), 80, Dropped, }, { - Pod(namespaces["z"] + "/" + clientName), + getPod("z", clientName), serverIP1.IPv4.String(), 80, Connected, @@ -3696,13 +3696,13 @@ func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { if clusterInfo.podV6NetworkCIDR != "" { ipv6TestCases := []podToAddrTestStep{ { - Pod(namespaces["z"] + "/" + clientName), + getPod("z", clientName), serverIP0.IPv6.String(), 80, Dropped, }, { - Pod(namespaces["z"] + "/" + clientName), + getPod("z", clientName), serverIP1.IPv6.String(), 80, Connected, @@ -3745,9 +3745,9 @@ func testACNPICMPSupport(t *testing.T, data *TestData) { builder = builder.SetName("test-acnp-icmp"). SetPriority(1.0).SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}) builder.AddEgress(ProtocolICMP, nil, nil, nil, &icmpType, &icmpCode, nil, nil, nil, map[string]string{"antrea-e2e": server0Name}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) builder.AddEgress(ProtocolICMP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": server1Name}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) testcases := []podToAddrTestStep{} if clusterInfo.podV4NetworkCIDR != "" { @@ -3848,7 +3848,7 @@ func testACNPNodePortServiceSupport(t *testing.T, data *TestData, serverNamespac }, }) builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, &cidr, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) acnp, err := k8sUtils.CreateOrUpdateACNP(builder.Get()) failOnError(err, t) @@ -3940,7 +3940,7 @@ func testACNPIGMPQuery(t *testing.T, data *TestData, acnpName, caseName, groupAd // create acnp with ingress rule for IGMP query igmpType := crdv1beta1.IGMPQuery builder.AddIngress(ProtocolIGMP, nil, nil, nil, nil, nil, &igmpType, &queryGroupAddress, nil, nil, nil, nil, - nil, nil, nil, false, nil, action, "", "", nil) + nil, nil, nil, nil, nil, action, "", "", nil) acnp := builder.Get() _, err = k8sUtils.CreateOrUpdateACNP(acnp) defer data.crdClient.CrdV1beta1().ClusterNetworkPolicies().Delete(context.TODO(), acnp.Name, metav1.DeleteOptions{}) @@ -4021,7 +4021,7 @@ func testACNPMulticastEgress(t *testing.T, data *TestData, acnpName, caseName, g SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": label}}}) cidr := mc.group.String() + "/32" builder.AddEgress(ProtocolUDP, nil, nil, nil, nil, nil, nil, nil, &cidr, nil, nil, nil, - nil, nil, nil, false, nil, action, "", "", nil) + nil, nil, nil, nil, nil, action, "", "", nil) acnp := builder.Get() _, err = k8sUtils.CreateOrUpdateACNP(acnp) if err != nil { @@ -4122,7 +4122,9 @@ func executeTestsWithData(t *testing.T, testList []*TestCase, data *TestData) { for _, step := range testCase.Steps { log.Infof("running step %s of test case %s", step.Name, testCase.Name) applyTestStepResources(t, step) - + if step.CustomSetup != nil { + step.CustomSetup() + } reachability := step.Reachability if reachability != nil { start := time.Now() @@ -4142,6 +4144,9 @@ func executeTestsWithData(t *testing.T, testList []*TestCase, data *TestData) { for _, p := range step.CustomProbes { doProbe(t, data, p, step.Protocol) } + if step.CustomTeardown != nil { + step.CustomTeardown() + } } log.Debug("Cleaning-up all policies and groups created by this Testcase") cleanupTestCaseResources(t, testCase) @@ -4315,7 +4320,7 @@ func waitForResourcesReady(t *testing.T, timeout time.Duration, objs ...metav1.O } // TestAntreaPolicy is the top-level test which contains all subtests for -// AntreaPolicy related test cases so they can share setup, teardown. +// AntreaPolicy related test cases so that they can share setup and teardown. func TestAntreaPolicy(t *testing.T) { skipIfHasWindowsNodes(t) skipIfAntreaPolicyDisabled(t) @@ -4326,7 +4331,7 @@ func TestAntreaPolicy(t *testing.T) { } defer teardownTest(t, data) - initialize(t, data) + initialize(t, data, nil) // This test group only provides one case for each CR, including ACNP, ANNP, Tier, // ClusterGroup and Group to make sure the corresponding validation webhooks is @@ -4457,6 +4462,54 @@ func testMulticastNP(t *testing.T, data *TestData, testNamespace string) { t.Run("Case=MulticastNPPolicyEgressDrop", func(t *testing.T) { testACNPMulticastEgressDrop(t, data, testNamespace) }) } +func TestAntreaPolicyExtendedNamespaces(t *testing.T) { + skipIfHasWindowsNodes(t) + skipIfAntreaPolicyDisabled(t) + + data, err := setupTest(t) + if err != nil { + t.Fatalf("Error when setting up test: %v", err) + } + defer teardownTest(t, data) + + extendedNamespaces := make(map[string]TestNamespaceMeta) + suffix := randName("") + // two "prod" Namespaces labeled purpose=test and tier=prod. + // two "dev" Namespaces labeled purpose=test and tier=dev. + // one "no-tier-label" Namespace labeled purpose=test. + for i := 1; i <= 2; i++ { + prodNS := TestNamespaceMeta{ + Name: "prod" + strconv.Itoa(i) + "-" + suffix, + Labels: map[string]string{ + "purpose": "test", + "tier": "prod", + }, + } + extendedNamespaces["prod"+strconv.Itoa(i)] = prodNS + devNS := TestNamespaceMeta{ + Name: "dev" + strconv.Itoa(i) + "-" + suffix, + Labels: map[string]string{ + "purpose": "test", + "tier": "dev", + }, + } + extendedNamespaces["dev"+strconv.Itoa(i)] = devNS + } + extendedNamespaces["no-tier"] = TestNamespaceMeta{ + Name: "no-tier-" + suffix, + Labels: map[string]string{ + "purpose": "test-exclusion", + }, + } + initialize(t, data, extendedNamespaces) + + t.Run("TestGroupACNPNamespaceLabelSelections", func(t *testing.T) { + t.Run("Case=ACNPStrictNamespacesIsolationByLabels", func(t *testing.T) { testACNPStrictNamespacesIsolationByLabels(t) }) + t.Run("Case=ACNPStrictNamespacesIsolationBySingleLabel", func(t *testing.T) { testACNPStrictNamespacesIsolationBySingleLabel(t, data) }) + }) + k8sUtils.Cleanup(namespaces) +} + func TestAntreaPolicyStatus(t *testing.T) { skipIfHasWindowsNodes(t) skipIfAntreaPolicyDisabled(t) @@ -4476,7 +4529,7 @@ func TestAntreaPolicyStatus(t *testing.T) { annpBuilder = annpBuilder.SetName(data.testNamespace, "annp-applied-to-two-nodes"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}) - annpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + annpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "") annp := annpBuilder.Get() log.Debugf("creating ANNP %v", annp.Name) @@ -4488,8 +4541,8 @@ func TestAntreaPolicyStatus(t *testing.T) { acnpBuilder = acnpBuilder.SetName("acnp-applied-to-two-nodes"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}) - acnpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + acnpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) acnp := acnpBuilder.Get() log.Debugf("creating ACNP %v", acnp.Name) _, err = data.crdClient.CrdV1beta1().ClusterNetworkPolicies().Create(context.TODO(), acnp, metav1.CreateOptions{}) @@ -4525,9 +4578,9 @@ func TestAntreaPolicyStatusWithAppliedToPerRule(t *testing.T) { annpBuilder := &AntreaNetworkPolicySpecBuilder{} annpBuilder = annpBuilder.SetName(data.testNamespace, "annp-applied-to-per-rule"). SetPriority(1.0) - annpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + annpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, []ANNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": server0Name}}}, crdv1beta1.RuleActionAllow, "", "") - annpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + annpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, []ANNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": server1Name}}}, crdv1beta1.RuleActionAllow, "", "") annp := annpBuilder.Get() log.Debugf("creating ANNP %v", annp.Name) @@ -4592,15 +4645,15 @@ func TestAntreaPolicyStatusWithAppliedToUnsupportedGroup(t *testing.T) { } defer teardownTest(t, data) - initialize(t, data) + initialize(t, data, nil) - testNamespace := namespaces["x"] + testNamespace := getNS("x") // Build a Group with namespaceSelector selecting namespaces outside testNamespace. grpName := "grp-with-ns-selector" grpBuilder := &GroupSpecBuilder{} grpBuilder = grpBuilder.SetName(grpName).SetNamespace(testNamespace). SetPodSelector(map[string]string{"pod": "b"}, nil). - SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil) + SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil) grp, err := k8sUtils.CreateOrUpdateGroup(grpBuilder.Get()) failOnError(err, t) failOnError(waitForResourceReady(t, timeout, grp), t) @@ -4711,7 +4764,28 @@ func (data *TestData) waitForACNPRealized(t *testing.T, name string, timeout tim return nil } -// testANNPNetworkPolicyStatsWithDropAction tests antreanetworkpolicystats can correctly collect dropped packets stats from ANNP if +// TestAntreaPolicyStats is the top-level test which contains all subtests for +// AntreaPolicyStats related test cases so they can share setup, teardown. +func TestAntreaPolicyStats(t *testing.T) { + skipIfHasWindowsNodes(t) + skipIfAntreaPolicyDisabled(t) + skipIfNetworkPolicyStatsDisabled(t) + + data, err := setupTest(t) + if err != nil { + t.Fatalf("Error when setting up test: %v", err) + } + defer teardownTest(t, data) + + t.Run("testANNPNetworkPolicyStatsWithDropAction", func(t *testing.T) { + testANNPNetworkPolicyStatsWithDropAction(t, data) + }) + t.Run("testAntreaClusterNetworkPolicyStats", func(t *testing.T) { + testAntreaClusterNetworkPolicyStats(t, data) + }) +} + +// testANPNetworkPolicyStatsWithDropAction tests antreanetworkpolicystats can correctly collect dropped packets stats from ANP if // networkpolicystats feature is enabled func testANNPNetworkPolicyStatsWithDropAction(t *testing.T, data *TestData) { serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", data.testNamespace, false) diff --git a/test/e2e/clustergroup_test.go b/test/e2e/clustergroup_test.go index 17267ba788d..6576546b1ae 100644 --- a/test/e2e/clustergroup_test.go +++ b/test/e2e/clustergroup_test.go @@ -49,7 +49,7 @@ func testInvalidCGIPBlockWithPodSelector(t *testing.T) { func testInvalidCGIPBlockWithNSSelector(t *testing.T) { invalidErr := fmt.Errorf("clustergroup created with ipblock and namespaceSelector") cgName := "ipb-ns" - nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": namespaces["y"]}} + nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": getNS("y")}} cidr := "10.0.0.10/32" ipb := []crdv1beta1.IPBlock{{CIDR: cidr}} cg := &crdv1beta1.ClusterGroup{ @@ -72,7 +72,7 @@ func testInvalidCGServiceRefWithPodSelector(t *testing.T) { cgName := "svcref-pod-selector" pSel := &metav1.LabelSelector{MatchLabels: map[string]string{"pod": "x"}} svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } cg := &crdv1beta1.ClusterGroup{ @@ -93,9 +93,9 @@ func testInvalidCGServiceRefWithPodSelector(t *testing.T) { func testInvalidCGServiceRefWithNSSelector(t *testing.T) { invalidErr := fmt.Errorf("clustergroup created with serviceReference and namespaceSelector") cgName := "svcref-ns-selector" - nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": namespaces["y"]}} + nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": getNS("y")}} svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } cg := &crdv1beta1.ClusterGroup{ @@ -119,7 +119,7 @@ func testInvalidCGServiceRefWithIPBlock(t *testing.T) { cidr := "10.0.0.10/32" ipb := []crdv1beta1.IPBlock{{CIDR: cidr}} svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } cg := &crdv1beta1.ClusterGroup{ @@ -182,7 +182,7 @@ func testInvalidCGChildGroupWithServiceReference(t *testing.T) { invalidErr := fmt.Errorf("clustergroup created with childGroups and ServiceReference") cgName := "child-group-svcref" svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } cg := &crdv1beta1.ClusterGroup{ @@ -320,7 +320,7 @@ func TestClusterGroup(t *testing.T) { } defer teardownTest(t, data) - initialize(t, data) + initialize(t, data, nil) t.Run("TestGroupClusterGroupValidate", func(t *testing.T) { t.Run("Case=IPBlockWithPodSelectorDenied", func(t *testing.T) { testInvalidCGIPBlockWithPodSelector(t) }) diff --git a/test/e2e/group_test.go b/test/e2e/group_test.go index a35651c7b8f..121c1a46627 100644 --- a/test/e2e/group_test.go +++ b/test/e2e/group_test.go @@ -32,7 +32,7 @@ func testInvalidGroupIPBlockWithPodSelector(t *testing.T) { g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces["x"], + Namespace: getNS("x"), }, Spec: crdv1beta1.GroupSpec{ PodSelector: pSel, @@ -48,13 +48,13 @@ func testInvalidGroupIPBlockWithPodSelector(t *testing.T) { func testInvalidGroupIPBlockWithNSSelector(t *testing.T) { invalidErr := fmt.Errorf("group created with ipblock and namespaceSelector") gName := "ipb-ns" - nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": namespaces["y"]}} + nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": getNS("y")}} cidr := "10.0.0.10/32" ipb := []crdv1beta1.IPBlock{{CIDR: cidr}} g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces["x"], + Namespace: getNS("x"), }, Spec: crdv1beta1.GroupSpec{ NamespaceSelector: nSel, @@ -72,13 +72,13 @@ func testInvalidGroupServiceRefWithPodSelector(t *testing.T) { gName := "svcref-pod-selector" pSel := &metav1.LabelSelector{MatchLabels: map[string]string{"pod": "x"}} svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces["y"], + Namespace: getNS("y"), }, Spec: crdv1beta1.GroupSpec{ PodSelector: pSel, @@ -94,15 +94,15 @@ func testInvalidGroupServiceRefWithPodSelector(t *testing.T) { func testInvalidGroupServiceRefWithNSSelector(t *testing.T) { invalidErr := fmt.Errorf("group created with serviceReference and namespaceSelector") gName := "svcref-ns-selector" - nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": namespaces["y"]}} + nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": getNS("y")}} svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces["y"], + Namespace: getNS("y"), }, Spec: crdv1beta1.GroupSpec{ NamespaceSelector: nSel, @@ -121,13 +121,13 @@ func testInvalidGroupServiceRefWithIPBlock(t *testing.T) { cidr := "10.0.0.10/32" ipb := []crdv1beta1.IPBlock{{CIDR: cidr}} svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces["y"], + Namespace: getNS("y"), }, Spec: crdv1beta1.GroupSpec{ ServiceReference: svcRef, @@ -149,7 +149,7 @@ func createChildGroupForTest(t *testing.T) { g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: testChildGroupName, - Namespace: namespaces[testChildGroupNamespace], + Namespace: getNS(testChildGroupNamespace), }, Spec: crdv1beta1.GroupSpec{ PodSelector: &metav1.LabelSelector{}, @@ -161,7 +161,7 @@ func createChildGroupForTest(t *testing.T) { } func cleanupChildGroupForTest(t *testing.T) { - if err := k8sUtils.DeleteGroup(namespaces[testChildGroupNamespace], testChildGroupName); err != nil { + if err := k8sUtils.DeleteGroup(getNS(testChildGroupNamespace), testChildGroupName); err != nil { failOnError(err, t) } } @@ -173,7 +173,7 @@ func testInvalidGroupChildGroupWithPodSelector(t *testing.T) { g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces[testChildGroupNamespace], + Namespace: getNS(testChildGroupNamespace), }, Spec: crdv1beta1.GroupSpec{ PodSelector: pSel, @@ -191,12 +191,12 @@ func testInvalidGroupChildGroupWithServiceReference(t *testing.T) { gName := "child-group-svcref" svcRef := &crdv1beta1.NamespacedName{ Name: "test-svc", - Namespace: namespaces[testChildGroupNamespace], + Namespace: getNS(testChildGroupNamespace), } g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces[testChildGroupNamespace], + Namespace: getNS(testChildGroupNamespace), }, Spec: crdv1beta1.GroupSpec{ ServiceReference: svcRef, @@ -213,13 +213,13 @@ func testInvalidGroupMaxNestedLevel(t *testing.T) { invalidErr := fmt.Errorf("group created with childGroup which has childGroups itself") gName1, gName2 := "g-nested-1", "g-nested-2" g1 := &crdv1beta1.Group{ - ObjectMeta: metav1.ObjectMeta{Namespace: namespaces[testChildGroupNamespace], Name: gName1}, + ObjectMeta: metav1.ObjectMeta{Namespace: getNS(testChildGroupNamespace), Name: gName1}, Spec: crdv1beta1.GroupSpec{ ChildGroups: []crdv1beta1.ClusterGroupReference{crdv1beta1.ClusterGroupReference(testChildGroupName)}, }, } g2 := &crdv1beta1.Group{ - ObjectMeta: metav1.ObjectMeta{Namespace: namespaces[testChildGroupNamespace], Name: gName2}, + ObjectMeta: metav1.ObjectMeta{Namespace: getNS(testChildGroupNamespace), Name: gName2}, Spec: crdv1beta1.GroupSpec{ ChildGroups: []crdv1beta1.ClusterGroupReference{crdv1beta1.ClusterGroupReference(gName1)}, }, @@ -235,7 +235,7 @@ func testInvalidGroupMaxNestedLevel(t *testing.T) { failOnError(invalidErr, t) } // cleanup g-nested-1 - if err := k8sUtils.DeleteGroup(namespaces[testChildGroupNamespace], gName1); err != nil { + if err := k8sUtils.DeleteGroup(getNS(testChildGroupNamespace), gName1); err != nil { failOnError(err, t) } // Try to create g-nested-2 first and then g-nested-1. @@ -249,7 +249,7 @@ func testInvalidGroupMaxNestedLevel(t *testing.T) { failOnError(invalidErr, t) } // cleanup g-nested-2 - if err := k8sUtils.DeleteGroup(namespaces[testChildGroupNamespace], gName2); err != nil { + if err := k8sUtils.DeleteGroup(getNS(testChildGroupNamespace), gName2); err != nil { failOnError(err, t) } } @@ -263,7 +263,7 @@ func TestGroup(t *testing.T) { t.Fatalf("Error when setting up test: %v", err) } defer teardownTest(t, data) - initialize(t, data) + initialize(t, data, nil) t.Run("TestGroupNamespacedGroupValidate", func(t *testing.T) { t.Run("Case=IPBlockWithPodSelectorDenied", func(t *testing.T) { testInvalidGroupIPBlockWithPodSelector(t) }) diff --git a/test/e2e/k8s_util.go b/test/e2e/k8s_util.go index ef0720afc6d..7f1570822a6 100644 --- a/test/e2e/k8s_util.go +++ b/test/e2e/k8s_util.go @@ -38,6 +38,8 @@ import ( "antrea.io/antrea/test/e2e/utils" ) +var ErrPodNotFound = errors.New("pod not found") + type KubernetesUtils struct { *TestData podCache map[string][]v1.Pod @@ -60,13 +62,15 @@ type TestCase struct { // TestStep is a single unit of testing spec. It includes the policy specs that need to be // applied for this test, the port to test traffic on and the expected Reachability matrix. type TestStep struct { - Name string - Reachability *Reachability - TestResources []metav1.Object - Ports []int32 - Protocol utils.AntreaPolicyProtocol - Duration time.Duration - CustomProbes []*CustomProbe + Name string + Reachability *Reachability + TestResources []metav1.Object + Ports []int32 + Protocol utils.AntreaPolicyProtocol + Duration time.Duration + CustomProbes []*CustomProbe + CustomSetup func() + CustomTeardown func() } // CustomProbe will spin up (or update) SourcePod and DestPod such that Add event of Pods @@ -89,7 +93,11 @@ type probeResult struct { err error } -var ErrPodNotFound = errors.New("Pod not found") +// TestNamespaceMeta holds the relevant metadata of a test Namespace during initialization. +type TestNamespaceMeta struct { + Name string + Labels map[string]string +} // GetPodByLabel returns a Pod with the matching Namespace and "pod" label if it's found. // If the pod is not found, GetPodByLabel returns "ErrPodNotFound". @@ -736,9 +744,9 @@ func (data *TestData) DeleteNetworkPolicy(ns, name string) error { } // CleanNetworkPolicies is a convenience function for deleting NetworkPolicies in the provided namespaces. -func (data *TestData) CleanNetworkPolicies(namespaces map[string]string) error { +func (data *TestData) CleanNetworkPolicies(namespaces map[string]TestNamespaceMeta) error { for _, ns := range namespaces { - if err := data.clientset.NetworkingV1().NetworkPolicies(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil { + if err := data.clientset.NetworkingV1().NetworkPolicies(ns.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil { return fmt.Errorf("unable to delete NetworkPolicies in Namespace '%s': %w", ns, err) } } @@ -1101,11 +1109,15 @@ func (k *KubernetesUtils) ValidateRemoteCluster(remoteCluster *KubernetesUtils, } } -func (k *KubernetesUtils) Bootstrap(namespaces map[string]string, pods []string, createNamespaces bool, nodeNames map[string]string, hostNetworks map[string]bool) (map[string][]string, error) { +func (k *KubernetesUtils) Bootstrap(namespaces map[string]TestNamespaceMeta, podsPerNamespace []string, createNamespaces bool, nodeNames map[string]string, hostNetworks map[string]bool) (map[string][]string, error) { for key, ns := range namespaces { if createNamespaces { - _, err := k.CreateOrUpdateNamespace(ns, map[string]string{"ns": ns}) - if err != nil { + if ns.Labels == nil { + ns.Labels = make(map[string]string) + } + // convenience label for testing + ns.Labels["ns"] = ns.Name + if _, err := k.CreateOrUpdateNamespace(ns.Name, ns.Labels); err != nil { return nil, fmt.Errorf("unable to create/update ns %s: %w", ns, err) } } @@ -1117,20 +1129,20 @@ func (k *KubernetesUtils) Bootstrap(namespaces map[string]string, pods []string, if hostNetworks != nil { hostNetwork = hostNetworks[key] } - for _, pod := range pods { + for _, pod := range podsPerNamespace { log.Infof("Creating/updating Pod '%s/%s'", ns, pod) - deployment := ns + pod - _, err := k.CreateOrUpdateDeployment(ns, deployment, 1, map[string]string{"pod": pod, "app": pod}, nodeName, hostNetwork) + deployment := ns.Name + pod + _, err := k.CreateOrUpdateDeployment(ns.Name, deployment, 1, map[string]string{"pod": pod, "app": pod}, nodeName, hostNetwork) if err != nil { return nil, fmt.Errorf("unable to create/update Deployment '%s/%s': %w", ns, pod, err) } } } var allPods []Pod - podIPs := make(map[string][]string, len(pods)*len(namespaces)) - for _, podName := range pods { + podIPs := make(map[string][]string, len(podsPerNamespace)*len(namespaces)) + for _, podName := range podsPerNamespace { for _, ns := range namespaces { - allPods = append(allPods, NewPod(ns, podName)) + allPods = append(allPods, NewPod(ns.Name, podName)) } } for _, pod := range allPods { @@ -1150,7 +1162,7 @@ func (k *KubernetesUtils) Bootstrap(namespaces map[string]string, pods []string, return podIPs, nil } -func (k *KubernetesUtils) Cleanup(namespaces map[string]string) { +func (k *KubernetesUtils) Cleanup(namespaces map[string]TestNamespaceMeta) { // Cleanup any cluster-scoped resources. if err := k.CleanACNPs(); err != nil { log.Errorf("Error when cleaning up ACNPs: %v", err) @@ -1161,7 +1173,7 @@ func (k *KubernetesUtils) Cleanup(namespaces map[string]string) { for _, ns := range namespaces { log.Infof("Deleting test Namespace %s", ns) - if err := k.DeleteNamespace(ns, defaultTimeout); err != nil { + if err := k.DeleteNamespace(ns.Name, defaultTimeout); err != nil { log.Errorf("Error when deleting Namespace '%s': %v", ns, err) } } diff --git a/test/e2e/nodenetworkpolicy_test.go b/test/e2e/nodenetworkpolicy_test.go index be9cb945ecd..5564fd37329 100644 --- a/test/e2e/nodenetworkpolicy_test.go +++ b/test/e2e/nodenetworkpolicy_test.go @@ -36,11 +36,14 @@ func initializeAntreaNodeNetworkPolicy(t *testing.T, data *TestData, toHostNetwo p8081 = 8081 p8082 = 8082 p8085 = 8085 - pods = []string{"a"} + podsPerNamespace = []string{"a"} suffix := randName("") - namespaces = make(map[string]string) - namespaces["x"] = "x-" + suffix - namespaces["y"] = "y-" + suffix + namespaces = make(map[string]TestNamespaceMeta) + for _, ns := range []string{"x", "y", "z"} { + namespaces[ns] = TestNamespaceMeta{ + Name: ns + "-" + suffix, + } + } nodes = make(map[string]string) nodes["x"] = controlPlaneNodeName() nodes["y"] = workerNodeName(1) @@ -50,15 +53,14 @@ func initializeAntreaNodeNetworkPolicy(t *testing.T, data *TestData, toHostNetwo hostNetworks["y"] = true } else { hostNetworks["y"] = false - namespaces["z"] = "z-" + suffix nodes["z"] = workerNodeName(1) hostNetworks["z"] = false } allPods = []Pod{} - for _, podName := range pods { + for _, podName := range podsPerNamespace { for _, ns := range namespaces { - allPods = append(allPods, NewPod(ns, podName)) + allPods = append(allPods, NewPod(ns.Name, podName)) } } @@ -66,7 +68,7 @@ func initializeAntreaNodeNetworkPolicy(t *testing.T, data *TestData, toHostNetwo // k8sUtils is a global var k8sUtils, err = NewKubernetesUtils(data) failOnError(err, t) - ips, err := k8sUtils.Bootstrap(namespaces, pods, true, nodes, hostNetworks) + ips, err := k8sUtils.Bootstrap(namespaces, podsPerNamespace, true, nodes, hostNetworks) failOnError(err, t) podIPs = ips } @@ -139,25 +141,23 @@ func testNodeACNPAllowNoDefaultIsolation(t *testing.T, protocol AntreaPolicyProt SetPriority(1.1). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder1.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-allow-x-to-y-egress"). SetPriority(1.1). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder2.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Connected) testStep := []*TestStep{ { - "Port 81", - reachability, - []metav1.Object{builder1.Get(), builder2.Get()}, - []int32{81}, - protocol, - 0, - nil, + Name: "Port 81", + Reachability: reachability, + TestResources: []metav1.Object{builder1.Get(), builder2.Get()}, + Ports: []int32{81}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -189,19 +189,17 @@ func testNodeACNPDropEgress(t *testing.T, protocol AntreaPolicyProtocol) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -225,19 +223,17 @@ func testNodeACNPDropIngress(t *testing.T, protocol AntreaPolicyProtocol) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -253,19 +249,17 @@ func testNodeACNPPortRange(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) testSteps := []*TestStep{ { - fmt.Sprintf("ACNP Drop Ports 8080:8082"), - reachability, - []metav1.Object{builder.Get()}, - []int32{8080, 8081, 8082}, - ProtocolTCP, - 0, - nil, + Name: fmt.Sprintf("ACNP Drop Ports 8080:8082"), + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{8080, 8081, 8082}, + Protocol: ProtocolTCP, }, } @@ -280,7 +274,7 @@ func testNodeACNPPortRange(t *testing.T) { // This test retrieves the port range from the client Pod and uses it in sourcePort and sourceEndPort of an ACNP rule to // verify that packets can be matched by source port. func testNodeACNPSourcePort(t *testing.T) { - portStart, portEnd, err := k8sUtils.getTCPv4SourcePortRangeFromPod(namespaces["x"], "a") + portStart, portEnd, err := k8sUtils.getTCPv4SourcePortRangeFromPod(getNS("x"), "a") failOnError(err, t) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-source-port"). @@ -304,37 +298,31 @@ func testNodeACNPSourcePort(t *testing.T) { nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) // After adding the dst port constraint of port 80, traffic on port 81 should not be affected. updatedReachability := NewReachability(allPods, Connected) testSteps := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "Port 81", - updatedReachability, - []metav1.Object{builder2.Get()}, - []int32{81}, - ProtocolTCP, - 0, - nil, + Name: "Port 81", + Reachability: updatedReachability, + TestResources: []metav1.Object{builder2.Get()}, + Ports: []int32{81}, + Protocol: ProtocolTCP, }, { - "Port range 80-81", - reachability, - []metav1.Object{builder3.Get()}, - []int32{80, 81}, - ProtocolTCP, - 0, - nil, + Name: "Port range 80-81", + Reachability: reachability, + TestResources: []metav1.Object{builder3.Get()}, + Ports: []int32{80, 81}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -367,7 +355,7 @@ func testNodeACNPRejectEgress(t *testing.T, protocol AntreaPolicyProtocol) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) @@ -376,16 +364,14 @@ func testNodeACNPRejectEgress(t *testing.T, protocol AntreaPolicyProtocol) { if protocol == ProtocolSCTP { expectedResult = Dropped } - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), expectedResult) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), expectedResult) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -401,19 +387,17 @@ func testNodeACNPRejectIngress(t *testing.T, protocol AntreaPolicyProtocol) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Rejected) + reachability.Expect(getPod("y", "a"), getPod("x", "a"), Rejected) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -429,31 +413,27 @@ func testNodeACNPNoEffectOnOtherProtocols(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability1 := NewReachability(allPods, Connected) - reachability1.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachability1.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) reachability2 := NewReachability(allPods, Connected) testStep := []*TestStep{ { - "Port 80", - reachability1, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability1, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "Port 80", - reachability2, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolUDP, - 0, - nil, + Name: "Port 80", + Reachability: reachability2, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolUDP, }, } testCase := []*TestCase{ @@ -471,7 +451,7 @@ func testNodeACNPPriorityOverride(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) // Highest priority. Drops traffic from y to x. builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-priority2"). @@ -479,7 +459,7 @@ func testNodeACNPPriorityOverride(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) // Medium priority. Allows traffic from y to x. builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-priority3"). @@ -487,34 +467,30 @@ func testNodeACNPPriorityOverride(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) // Lowest priority. Drops traffic from y to x. builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) reachabilityAllACNPs := NewReachability(allPods, Connected) - reachabilityAllACNPs.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachabilityAllACNPs.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) testStepTwoACNP := []*TestStep{ { - "Two Policies with different priorities", - reachabilityTwoACNPs, - []metav1.Object{builder3.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Two Policies with different priorities", + Reachability: reachabilityTwoACNPs, + TestResources: []metav1.Object{builder3.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } // Create the Policies in specific order to make sure that priority re-assignments work as expected. testStepAll := []*TestStep{ { - "All three Policies", - reachabilityAllACNPs, - []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "All three Policies", + Reachability: reachabilityAllACNPs, + TestResources: []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -534,51 +510,47 @@ func testNodeACNPTierOverride(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) // Highest priority tier. Drops traffic from y to x. builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-securityops"). SetTier("securityops"). SetPriority(10). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Medium priority tier. Allows traffic from y to x. builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-tier-application"). SetTier("application"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // Lowest priority tier. Drops traffic from y to x. builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) reachabilityAllACNPs := NewReachability(allPods, Connected) - reachabilityAllACNPs.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachabilityAllACNPs.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) testStepTwoACNP := []*TestStep{ { - "Two Policies in different tiers", - reachabilityTwoACNPs, - []metav1.Object{builder3.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Two Policies in different tiers", + Reachability: reachabilityTwoACNPs, + TestResources: []metav1.Object{builder3.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testStepAll := []*TestStep{ { - "All three Policies in different tiers", - reachabilityAllACNPs, - []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "All three Policies in different tiers", + Reachability: reachabilityAllACNPs, + TestResources: []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -606,7 +578,7 @@ func testNodeACNPCustomTiers(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) // Medium priority tier. Allows traffic from y to x. builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-low"). @@ -615,32 +587,28 @@ func testNodeACNPCustomTiers(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) // Lowest priority tier. Drops traffic from y to x. builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityOneACNP := NewReachability(allPods, Connected) - reachabilityOneACNP.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachabilityOneACNP.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) testStepOneACNP := []*TestStep{ { - "One Policy", - reachabilityOneACNP, - []metav1.Object{builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "One Policy", + Reachability: reachabilityOneACNP, + TestResources: []metav1.Object{builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } reachabilityTwoACNPs := NewReachability(allPods, Connected) testStepTwoACNP := []*TestStep{ { - "Two Policies in different tiers", - reachabilityTwoACNPs, - []metav1.Object{builder2.Get(), builder1.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Two Policies in different tiers", + Reachability: reachabilityTwoACNPs, + TestResources: []metav1.Object{builder2.Get(), builder1.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -663,7 +631,7 @@ func testNodeACNPPriorityConflictingRule(t *testing.T) { SetPriority(1). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-allow"). @@ -672,19 +640,17 @@ func testNodeACNPPriorityConflictingRule(t *testing.T) { // The following ingress rule will take no effect as it is exactly the same as ingress rule of cnp-drop, // but cnp-allow has lower priority. builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachabilityBothACNP := NewReachability(allPods, Connected) - reachabilityBothACNP.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachabilityBothACNP.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) testStep := []*TestStep{ { - "Both ACNP", - reachabilityBothACNP, - []metav1.Object{builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Both ACNP", + Reachability: reachabilityBothACNP, + TestResources: []metav1.Object{builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -699,19 +665,17 @@ func testNodeACNPNamespaceIsolation(t *testing.T) { SetTier("baseline"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) - builder1.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, nil, nil, nil, - false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("y")}, nil, nil, nil, + nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability1 := NewReachability(allPods, Connected) - reachability1.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) + reachability1.ExpectEgressToNamespace(getPod("x", "a"), getNS("y"), Dropped) testStep1 := &TestStep{ - "Port 80", - reachability1, - []metav1.Object{builder1.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability1, + TestResources: []metav1.Object{builder1.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testCase := []*TestCase{ @@ -723,40 +687,36 @@ func testNodeACNPNamespaceIsolation(t *testing.T) { func testNodeACNPClusterGroupUpdate(t *testing.T) { cgName := "cg-ns-z-then-y" cgBuilder := &ClusterGroupSpecBuilder{} - cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil) + cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": getNS("z")}, nil) // Update CG NS selector to group Pods from Namespace Y updatedCgBuilder := &ClusterGroupSpecBuilder{} - updatedCgBuilder = updatedCgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil) + updatedCgBuilder = updatedCgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) updatedReachability := NewReachability(allPods, Connected) - updatedReachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) + updatedReachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("y"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{cgBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{cgBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "Port 80 - update", - updatedReachability, - []metav1.Object{updatedCgBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 - update", + Reachability: updatedReachability, + TestResources: []metav1.Object{updatedCgBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -766,8 +726,8 @@ func testNodeACNPClusterGroupUpdate(t *testing.T) { } func testNodeACNPClusterGroupRefRuleIPBlocks(t *testing.T) { - podYAIP, _ := podIPs[namespaces["y"]+"/a"] - podZAIP, _ := podIPs[namespaces["z"]+"/a"] + podYAIP, _ := podIPs[getNS("y")+"/a"] + podZAIP, _ := podIPs[getNS("z")+"/a"] // There are three situations of a Pod's IP(s): // 1. Only one IPv4 address. // 2. Only one IPv6 address. @@ -799,22 +759,20 @@ func testNodeACNPClusterGroupRefRuleIPBlocks(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["z"]+"/a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("z", "a"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get(), cgBuilder.Get(), cgBuilder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), cgBuilder.Get(), cgBuilder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -826,7 +784,7 @@ func testNodeACNPClusterGroupRefRuleIPBlocks(t *testing.T) { func testNodeACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { cg1Name := "cg-1" cgBuilder1 := &ClusterGroupSpecBuilder{} - cgBuilder1 = cgBuilder1.SetName(cg1Name).SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil) + cgBuilder1 = cgBuilder1.SetName(cg1Name).SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil) cgNestedName := "cg-nested" cgBuilderNested := &ClusterGroupSpecBuilder{} cgBuilderNested = cgBuilderNested.SetName(cgNestedName).SetChildGroups([]string{cg1Name}) @@ -835,35 +793,31 @@ func testNodeACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) builder = builder.SetName("cnp-nested-cg").SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}). AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - false, nil, crdv1beta1.RuleActionDrop, cgNestedName, "", nil) + nil, nil, crdv1beta1.RuleActionDrop, cgNestedName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("y"), Dropped) testStep1 := &TestStep{ - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the ClusterGroup is created after the ACNP - []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilderNested.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilderNested.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } cg2Name := "cg-2" cgBuilder2 := &ClusterGroupSpecBuilder{} - cgBuilder2 = cgBuilder2.SetName(cg2Name).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil) + cgBuilder2 = cgBuilder2.SetName(cg2Name).SetNamespaceSelector(map[string]string{"ns": getNS("z")}, nil) cgBuilderNested = cgBuilderNested.SetChildGroups([]string{cg2Name}) reachability2 := NewReachability(allPods, Connected) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) testStep2 := &TestStep{ - "Port 80 updated", - reachability2, - []metav1.Object{cgBuilder2.Get(), cgBuilderNested.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 updated", + Reachability: reachability2, + TestResources: []metav1.Object{cgBuilder2.Get(), cgBuilderNested.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testSteps := []*TestStep{testStep1, testStep2} @@ -874,8 +828,8 @@ func testNodeACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) } func testNodeACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { - podYAIP, _ := podIPs[namespaces["y"]+"/a"] - podZAIP, _ := podIPs[namespaces["z"]+"/a"] + podYAIP, _ := podIPs[getPodName("y", "a")] + podZAIP, _ := podIPs[getPodName("z", "a")] genCIDR := func(ip string) string { switch IPFamily(ip) { case "v4": @@ -905,33 +859,29 @@ func testNodeACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["z"]+"/a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("z", "a"), Dropped) testStep := &TestStep{ - "Port 80", - reachability, - []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilder2.Get(), cgParent.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilder2.Get(), cgParent.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } cgParent = cgParent.SetChildGroups([]string{cg1Name}) reachability2 := NewReachability(allPods, Connected) - reachability2.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability2.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) testStep2 := &TestStep{ - "Port 80, updated", - reachability2, - []metav1.Object{cgParent.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80, updated", + Reachability: reachability2, + TestResources: []metav1.Object{cgParent.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testCase := []*TestCase{ diff --git a/test/e2e/reachability.go b/test/e2e/reachability.go index 0bf04418db5..48166588a4c 100644 --- a/test/e2e/reachability.go +++ b/test/e2e/reachability.go @@ -325,6 +325,26 @@ func (r *Reachability) ExpectEgressToNamespace(pod Pod, namespace string, connec } } +func (r *Reachability) ExpectNamespaceIngressFromNamespace(dstNamespace, srcNamespace string, connectivity PodConnectivityMark) { + dstPods, ok := r.PodsByNamespace[dstNamespace] + if !ok { + panic(fmt.Errorf("destination Namespace %s is not found", dstNamespace)) + } + for _, p := range dstPods { + r.ExpectIngressFromNamespace(p, srcNamespace, connectivity) + } +} + +func (r *Reachability) ExpectNamespaceEgressToNamespace(srcNamespace, dstNamespace string, connectivity PodConnectivityMark) { + srcPods, ok := r.PodsByNamespace[srcNamespace] + if !ok { + panic(fmt.Errorf("src Namespace %s is not found", srcNamespace)) + } + for _, p := range srcPods { + r.ExpectEgressToNamespace(p, dstNamespace, connectivity) + } +} + func (r *Reachability) Observe(pod1 Pod, pod2 Pod, connectivity PodConnectivityMark) { r.Observed.Set(string(pod1), string(pod2), connectivity) } diff --git a/test/e2e/utils/cnp_spec_builder.go b/test/e2e/utils/cnp_spec_builder.go index 0c6acdc7585..708826754aa 100644 --- a/test/e2e/utils/cnp_spec_builder.go +++ b/test/e2e/utils/cnp_spec_builder.go @@ -130,15 +130,13 @@ func (b *ClusterNetworkPolicySpecBuilder) GetAppliedToPeer(podSelector map[strin func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol, port *int32, portName *string, endPort, icmpType, icmpCode, igmpType *int32, groupAddress, cidr *string, podSelector map[string]string, nodeSelector map[string]string, nsSelector map[string]string, - podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool, + podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, namespaces *crdv1beta1.PeerNamespaces, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction, ruleClusterGroup, name string, serviceAccount *crdv1beta1.NamespacedName) *ClusterNetworkPolicySpecBuilder { var podSel *metav1.LabelSelector var nodeSel *metav1.LabelSelector var nsSel *metav1.LabelSelector - var ns *crdv1beta1.PeerNamespaces var appliedTos []crdv1beta1.AppliedTo - matchSelf := crdv1beta1.NamespaceMatchSelf if b.Spec.Ingress == nil { b.Spec.Ingress = []crdv1beta1.Rule{} @@ -162,11 +160,6 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol MatchExpressions: nsSelectorMatchExp, } } - if selfNS == true { - ns = &crdv1beta1.PeerNamespaces{ - Match: matchSelf, - } - } var ipBlock *crdv1beta1.IPBlock if cidr != nil { ipBlock = &crdv1beta1.IPBlock{ @@ -185,12 +178,12 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol } // An empty From/To in ACNP rules evaluates to match all addresses. policyPeer := make([]crdv1beta1.NetworkPolicyPeer, 0) - if podSel != nil || nodeSel != nil || nsSel != nil || ns != nil || ipBlock != nil || ruleClusterGroup != "" || serviceAccount != nil { + if podSel != nil || nodeSel != nil || nsSel != nil || namespaces != nil || ipBlock != nil || ruleClusterGroup != "" || serviceAccount != nil { policyPeer = []crdv1beta1.NetworkPolicyPeer{{ PodSelector: podSel, NodeSelector: nodeSel, NamespaceSelector: nsSel, - Namespaces: ns, + Namespaces: namespaces, IPBlock: ipBlock, Group: ruleClusterGroup, ServiceAccount: serviceAccount, @@ -297,14 +290,14 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngressForSrcPort(protoc AntreaPoli func (b *ClusterNetworkPolicySpecBuilder) AddEgress(protoc AntreaPolicyProtocol, port *int32, portName *string, endPort, icmpType, icmpCode, igmpType *int32, groupAddress, cidr *string, podSelector map[string]string, nodeSelector map[string]string, nsSelector map[string]string, - podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool, + podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, namespaces *crdv1beta1.PeerNamespaces, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction, ruleClusterGroup, name string, serviceAccount *crdv1beta1.NamespacedName) *ClusterNetworkPolicySpecBuilder { // For simplicity, we just reuse the Ingress code here. The underlying data model for ingress/egress is identical // With the exception of calling the rule `To` vs. `From`. c := &ClusterNetworkPolicySpecBuilder{} c.AddIngress(protoc, port, portName, endPort, icmpType, icmpCode, igmpType, groupAddress, cidr, podSelector, nodeSelector, nsSelector, - podSelectorMatchExp, nodeSelectorMatchExp, nsSelectorMatchExp, selfNS, ruleAppliedToSpecs, action, ruleClusterGroup, name, serviceAccount) + podSelectorMatchExp, nodeSelectorMatchExp, nsSelectorMatchExp, namespaces, ruleAppliedToSpecs, action, ruleClusterGroup, name, serviceAccount) theRule := c.Get().Spec.Ingress[0] b.Spec.Egress = append(b.Spec.Egress, crdv1beta1.Rule{