diff --git a/build/charts/antrea/crds/ippool.yaml b/build/charts/antrea/crds/ippool.yaml index ff1f3a4af0f..69d3a315496 100644 --- a/build/charts/antrea/crds/ippool.yaml +++ b/build/charts/antrea/crds/ippool.yaml @@ -4,12 +4,15 @@ metadata: name: ippools.crd.antrea.io labels: app: antrea + served-by: antrea-controller spec: group: crd.antrea.io versions: - name: v1alpha2 served: true - storage: true + storage: false + deprecated: true + deprecationWarning: "crd.antrea.io/v1alpha2 IPPool is deprecated; use crd.antrea.io/v1beta1 IPPool" schema: openAPIV3Schema: type: object @@ -122,6 +125,129 @@ spec: type: date subresources: status: {} + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + required: + - ipRanges + - subnetInfo + type: object + properties: + ipRanges: + items: + oneOf: + - required: + - cidr + - required: + - start + - end + properties: + cidr: + format: cidr + type: string + start: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + end: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + type: object + type: array + subnetInfo: + type: object + required: + - gateway + - prefixLength + properties: + gateway: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + prefixLength: + type: integer + minimum: 1 + maximum: 127 + vlan: + type: integer + minimum: 0 + maximum: 4094 + status: + properties: + ipAddresses: + items: + properties: + ipAddress: + type: string + owner: + properties: + pod: + properties: + name: + type: string + namespace: + type: string + containerID: + type: string + ifName: + type: string + type: object + statefulSet: + properties: + name: + type: string + namespace: + type: string + index: + type: integer + type: object + type: object + phase: + type: string + type: object + type: array + usage: + properties: + used: + type: integer + total: + type: integer + type: object + type: object + additionalPrinterColumns: + - description: The number of total IPs + jsonPath: .status.usage.total + name: Total + type: integer + - description: The number of allocated IPs + jsonPath: .status.usage.used + name: Used + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + subresources: + status: {} + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + service: + name: "antrea" + namespace: "kube-system" + path: "/convert/ippool" scope: Cluster names: plural: ippools diff --git a/build/charts/antrea/templates/webhooks/validating/crdvalidator.yaml b/build/charts/antrea/templates/webhooks/validating/crdvalidator.yaml index f01e702eefb..7873a036bb9 100644 --- a/build/charts/antrea/templates/webhooks/validating/crdvalidator.yaml +++ b/build/charts/antrea/templates/webhooks/validating/crdvalidator.yaml @@ -148,7 +148,7 @@ webhooks: rules: - operations: ["CREATE", "UPDATE", "DELETE"] apiGroups: ["crd.antrea.io"] - apiVersions: ["v1alpha2"] + apiVersions: ["v1beta1"] resources: ["ippools"] scope: "Cluster" admissionReviewVersions: ["v1", "v1beta1"] diff --git a/build/yamls/antrea-aks.yml b/build/yamls/antrea-aks.yml index 59157fcb6d5..1856ea30c8e 100644 --- a/build/yamls/antrea-aks.yml +++ b/build/yamls/antrea-aks.yml @@ -1771,12 +1771,15 @@ metadata: name: ippools.crd.antrea.io labels: app: antrea + served-by: antrea-controller spec: group: crd.antrea.io versions: - name: v1alpha2 served: true - storage: true + storage: false + deprecated: true + deprecationWarning: "crd.antrea.io/v1alpha2 IPPool is deprecated; use crd.antrea.io/v1beta1 IPPool" schema: openAPIV3Schema: type: object @@ -1889,6 +1892,129 @@ spec: type: date subresources: status: {} + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + required: + - ipRanges + - subnetInfo + type: object + properties: + ipRanges: + items: + oneOf: + - required: + - cidr + - required: + - start + - end + properties: + cidr: + format: cidr + type: string + start: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + end: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + type: object + type: array + subnetInfo: + type: object + required: + - gateway + - prefixLength + properties: + gateway: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + prefixLength: + type: integer + minimum: 1 + maximum: 127 + vlan: + type: integer + minimum: 0 + maximum: 4094 + status: + properties: + ipAddresses: + items: + properties: + ipAddress: + type: string + owner: + properties: + pod: + properties: + name: + type: string + namespace: + type: string + containerID: + type: string + ifName: + type: string + type: object + statefulSet: + properties: + name: + type: string + namespace: + type: string + index: + type: integer + type: object + type: object + phase: + type: string + type: object + type: array + usage: + properties: + used: + type: integer + total: + type: integer + type: object + type: object + additionalPrinterColumns: + - description: The number of total IPs + jsonPath: .status.usage.total + name: Total + type: integer + - description: The number of allocated IPs + jsonPath: .status.usage.used + name: Used + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + subresources: + status: {} + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + service: + name: "antrea" + namespace: "kube-system" + path: "/convert/ippool" scope: Cluster names: plural: ippools @@ -5378,7 +5504,7 @@ webhooks: rules: - operations: ["CREATE", "UPDATE", "DELETE"] apiGroups: ["crd.antrea.io"] - apiVersions: ["v1alpha2"] + apiVersions: ["v1beta1"] resources: ["ippools"] scope: "Cluster" admissionReviewVersions: ["v1", "v1beta1"] diff --git a/build/yamls/antrea-crds.yml b/build/yamls/antrea-crds.yml index c57d4446e6b..d047fa88444 100644 --- a/build/yamls/antrea-crds.yml +++ b/build/yamls/antrea-crds.yml @@ -1752,12 +1752,15 @@ metadata: name: ippools.crd.antrea.io labels: app: antrea + served-by: antrea-controller spec: group: crd.antrea.io versions: - name: v1alpha2 served: true - storage: true + storage: false + deprecated: true + deprecationWarning: "crd.antrea.io/v1alpha2 IPPool is deprecated; use crd.antrea.io/v1beta1 IPPool" schema: openAPIV3Schema: type: object @@ -1870,6 +1873,129 @@ spec: type: date subresources: status: {} + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + required: + - ipRanges + - subnetInfo + type: object + properties: + ipRanges: + items: + oneOf: + - required: + - cidr + - required: + - start + - end + properties: + cidr: + format: cidr + type: string + start: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + end: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + type: object + type: array + subnetInfo: + type: object + required: + - gateway + - prefixLength + properties: + gateway: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + prefixLength: + type: integer + minimum: 1 + maximum: 127 + vlan: + type: integer + minimum: 0 + maximum: 4094 + status: + properties: + ipAddresses: + items: + properties: + ipAddress: + type: string + owner: + properties: + pod: + properties: + name: + type: string + namespace: + type: string + containerID: + type: string + ifName: + type: string + type: object + statefulSet: + properties: + name: + type: string + namespace: + type: string + index: + type: integer + type: object + type: object + phase: + type: string + type: object + type: array + usage: + properties: + used: + type: integer + total: + type: integer + type: object + type: object + additionalPrinterColumns: + - description: The number of total IPs + jsonPath: .status.usage.total + name: Total + type: integer + - description: The number of allocated IPs + jsonPath: .status.usage.used + name: Used + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + subresources: + status: {} + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + service: + name: "antrea" + namespace: "kube-system" + path: "/convert/ippool" scope: Cluster names: plural: ippools diff --git a/build/yamls/antrea-eks.yml b/build/yamls/antrea-eks.yml index a1d650c23c4..d59e30e5f71 100644 --- a/build/yamls/antrea-eks.yml +++ b/build/yamls/antrea-eks.yml @@ -1771,12 +1771,15 @@ metadata: name: ippools.crd.antrea.io labels: app: antrea + served-by: antrea-controller spec: group: crd.antrea.io versions: - name: v1alpha2 served: true - storage: true + storage: false + deprecated: true + deprecationWarning: "crd.antrea.io/v1alpha2 IPPool is deprecated; use crd.antrea.io/v1beta1 IPPool" schema: openAPIV3Schema: type: object @@ -1889,6 +1892,129 @@ spec: type: date subresources: status: {} + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + required: + - ipRanges + - subnetInfo + type: object + properties: + ipRanges: + items: + oneOf: + - required: + - cidr + - required: + - start + - end + properties: + cidr: + format: cidr + type: string + start: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + end: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + type: object + type: array + subnetInfo: + type: object + required: + - gateway + - prefixLength + properties: + gateway: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + prefixLength: + type: integer + minimum: 1 + maximum: 127 + vlan: + type: integer + minimum: 0 + maximum: 4094 + status: + properties: + ipAddresses: + items: + properties: + ipAddress: + type: string + owner: + properties: + pod: + properties: + name: + type: string + namespace: + type: string + containerID: + type: string + ifName: + type: string + type: object + statefulSet: + properties: + name: + type: string + namespace: + type: string + index: + type: integer + type: object + type: object + phase: + type: string + type: object + type: array + usage: + properties: + used: + type: integer + total: + type: integer + type: object + type: object + additionalPrinterColumns: + - description: The number of total IPs + jsonPath: .status.usage.total + name: Total + type: integer + - description: The number of allocated IPs + jsonPath: .status.usage.used + name: Used + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + subresources: + status: {} + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + service: + name: "antrea" + namespace: "kube-system" + path: "/convert/ippool" scope: Cluster names: plural: ippools @@ -5379,7 +5505,7 @@ webhooks: rules: - operations: ["CREATE", "UPDATE", "DELETE"] apiGroups: ["crd.antrea.io"] - apiVersions: ["v1alpha2"] + apiVersions: ["v1beta1"] resources: ["ippools"] scope: "Cluster" admissionReviewVersions: ["v1", "v1beta1"] diff --git a/build/yamls/antrea-gke.yml b/build/yamls/antrea-gke.yml index a327292d4d6..33a0f87d118 100644 --- a/build/yamls/antrea-gke.yml +++ b/build/yamls/antrea-gke.yml @@ -1771,12 +1771,15 @@ metadata: name: ippools.crd.antrea.io labels: app: antrea + served-by: antrea-controller spec: group: crd.antrea.io versions: - name: v1alpha2 served: true - storage: true + storage: false + deprecated: true + deprecationWarning: "crd.antrea.io/v1alpha2 IPPool is deprecated; use crd.antrea.io/v1beta1 IPPool" schema: openAPIV3Schema: type: object @@ -1889,6 +1892,129 @@ spec: type: date subresources: status: {} + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + required: + - ipRanges + - subnetInfo + type: object + properties: + ipRanges: + items: + oneOf: + - required: + - cidr + - required: + - start + - end + properties: + cidr: + format: cidr + type: string + start: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + end: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + type: object + type: array + subnetInfo: + type: object + required: + - gateway + - prefixLength + properties: + gateway: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + prefixLength: + type: integer + minimum: 1 + maximum: 127 + vlan: + type: integer + minimum: 0 + maximum: 4094 + status: + properties: + ipAddresses: + items: + properties: + ipAddress: + type: string + owner: + properties: + pod: + properties: + name: + type: string + namespace: + type: string + containerID: + type: string + ifName: + type: string + type: object + statefulSet: + properties: + name: + type: string + namespace: + type: string + index: + type: integer + type: object + type: object + phase: + type: string + type: object + type: array + usage: + properties: + used: + type: integer + total: + type: integer + type: object + type: object + additionalPrinterColumns: + - description: The number of total IPs + jsonPath: .status.usage.total + name: Total + type: integer + - description: The number of allocated IPs + jsonPath: .status.usage.used + name: Used + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + subresources: + status: {} + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + service: + name: "antrea" + namespace: "kube-system" + path: "/convert/ippool" scope: Cluster names: plural: ippools @@ -5376,7 +5502,7 @@ webhooks: rules: - operations: ["CREATE", "UPDATE", "DELETE"] apiGroups: ["crd.antrea.io"] - apiVersions: ["v1alpha2"] + apiVersions: ["v1beta1"] resources: ["ippools"] scope: "Cluster" admissionReviewVersions: ["v1", "v1beta1"] diff --git a/build/yamls/antrea-ipsec.yml b/build/yamls/antrea-ipsec.yml index 91c70a3868c..702ec6728e4 100644 --- a/build/yamls/antrea-ipsec.yml +++ b/build/yamls/antrea-ipsec.yml @@ -1771,12 +1771,15 @@ metadata: name: ippools.crd.antrea.io labels: app: antrea + served-by: antrea-controller spec: group: crd.antrea.io versions: - name: v1alpha2 served: true - storage: true + storage: false + deprecated: true + deprecationWarning: "crd.antrea.io/v1alpha2 IPPool is deprecated; use crd.antrea.io/v1beta1 IPPool" schema: openAPIV3Schema: type: object @@ -1889,6 +1892,129 @@ spec: type: date subresources: status: {} + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + required: + - ipRanges + - subnetInfo + type: object + properties: + ipRanges: + items: + oneOf: + - required: + - cidr + - required: + - start + - end + properties: + cidr: + format: cidr + type: string + start: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + end: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + type: object + type: array + subnetInfo: + type: object + required: + - gateway + - prefixLength + properties: + gateway: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + prefixLength: + type: integer + minimum: 1 + maximum: 127 + vlan: + type: integer + minimum: 0 + maximum: 4094 + status: + properties: + ipAddresses: + items: + properties: + ipAddress: + type: string + owner: + properties: + pod: + properties: + name: + type: string + namespace: + type: string + containerID: + type: string + ifName: + type: string + type: object + statefulSet: + properties: + name: + type: string + namespace: + type: string + index: + type: integer + type: object + type: object + phase: + type: string + type: object + type: array + usage: + properties: + used: + type: integer + total: + type: integer + type: object + type: object + additionalPrinterColumns: + - description: The number of total IPs + jsonPath: .status.usage.total + name: Total + type: integer + - description: The number of allocated IPs + jsonPath: .status.usage.used + name: Used + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + subresources: + status: {} + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + service: + name: "antrea" + namespace: "kube-system" + path: "/convert/ippool" scope: Cluster names: plural: ippools @@ -5435,7 +5561,7 @@ webhooks: rules: - operations: ["CREATE", "UPDATE", "DELETE"] apiGroups: ["crd.antrea.io"] - apiVersions: ["v1alpha2"] + apiVersions: ["v1beta1"] resources: ["ippools"] scope: "Cluster" admissionReviewVersions: ["v1", "v1beta1"] diff --git a/build/yamls/antrea.yml b/build/yamls/antrea.yml index f166e30f9f1..76fb1bf1cce 100644 --- a/build/yamls/antrea.yml +++ b/build/yamls/antrea.yml @@ -1771,12 +1771,15 @@ metadata: name: ippools.crd.antrea.io labels: app: antrea + served-by: antrea-controller spec: group: crd.antrea.io versions: - name: v1alpha2 served: true - storage: true + storage: false + deprecated: true + deprecationWarning: "crd.antrea.io/v1alpha2 IPPool is deprecated; use crd.antrea.io/v1beta1 IPPool" schema: openAPIV3Schema: type: object @@ -1889,6 +1892,129 @@ spec: type: date subresources: status: {} + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + required: + - ipRanges + - subnetInfo + type: object + properties: + ipRanges: + items: + oneOf: + - required: + - cidr + - required: + - start + - end + properties: + cidr: + format: cidr + type: string + start: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + end: + oneOf: + - format: ipv4 + - format: ipv6 + type: string + type: object + type: array + subnetInfo: + type: object + required: + - gateway + - prefixLength + properties: + gateway: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + prefixLength: + type: integer + minimum: 1 + maximum: 127 + vlan: + type: integer + minimum: 0 + maximum: 4094 + status: + properties: + ipAddresses: + items: + properties: + ipAddress: + type: string + owner: + properties: + pod: + properties: + name: + type: string + namespace: + type: string + containerID: + type: string + ifName: + type: string + type: object + statefulSet: + properties: + name: + type: string + namespace: + type: string + index: + type: integer + type: object + type: object + phase: + type: string + type: object + type: array + usage: + properties: + used: + type: integer + total: + type: integer + type: object + type: object + additionalPrinterColumns: + - description: The number of total IPs + jsonPath: .status.usage.total + name: Total + type: integer + - description: The number of allocated IPs + jsonPath: .status.usage.used + name: Used + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + subresources: + status: {} + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + service: + name: "antrea" + namespace: "kube-system" + path: "/convert/ippool" scope: Cluster names: plural: ippools @@ -5376,7 +5502,7 @@ webhooks: rules: - operations: ["CREATE", "UPDATE", "DELETE"] apiGroups: ["crd.antrea.io"] - apiVersions: ["v1alpha2"] + apiVersions: ["v1beta1"] resources: ["ippools"] scope: "Cluster" admissionReviewVersions: ["v1", "v1beta1"] diff --git a/cmd/antrea-agent/agent.go b/cmd/antrea-agent/agent.go index 67182bfddc0..697af3ff64a 100644 --- a/cmd/antrea-agent/agent.go +++ b/cmd/antrea-agent/agent.go @@ -117,7 +117,7 @@ func run(o *Options) error { egressInformer := crdInformerFactory.Crd().V1beta1().Egresses() externalIPPoolInformer := crdInformerFactory.Crd().V1beta1().ExternalIPPools() trafficControlInformer := crdInformerFactory.Crd().V1alpha2().TrafficControls() - ipPoolInformer := crdInformerFactory.Crd().V1alpha2().IPPools() + ipPoolInformer := crdInformerFactory.Crd().V1beta1().IPPools() nodeInformer := informerFactory.Core().V1().Nodes() serviceInformer := informerFactory.Core().V1().Services() endpointsInformer := informerFactory.Core().V1().Endpoints() diff --git a/cmd/antrea-controller/controller.go b/cmd/antrea-controller/controller.go index 00d51e1f193..c9ef8917eb3 100644 --- a/cmd/antrea-controller/controller.go +++ b/cmd/antrea-controller/controller.go @@ -117,6 +117,7 @@ var allowedPaths = []string{ "/validate/supportbundlecollection", "/validate/traceflow", "/convert/clustergroup", + "/convert/ippool", } // run starts Antrea Controller with the given options and waits for termination signal. @@ -149,7 +150,7 @@ func run(o *Options) error { egressInformer := crdInformerFactory.Crd().V1beta1().Egresses() externalIPPoolInformer := crdInformerFactory.Crd().V1beta1().ExternalIPPools() externalNodeInformer := crdInformerFactory.Crd().V1alpha1().ExternalNodes() - ipPoolInformer := crdInformerFactory.Crd().V1alpha2().IPPools() + ipPoolInformer := crdInformerFactory.Crd().V1beta1().IPPools() adminNPInformer := policyInformerFactory.Policy().V1alpha1().AdminNetworkPolicies() banpInformer := policyInformerFactory.Policy().V1alpha1().BaselineAdminNetworkPolicies() diff --git a/docs/antrea-ipam.md b/docs/antrea-ipam.md index 503e00a4eee..3e4a412443a 100644 --- a/docs/antrea-ipam.md +++ b/docs/antrea-ipam.md @@ -160,18 +160,18 @@ enableBridgingMode=true,featureGates.AntreaIPAM=true,trafficEncapMode=noEncap,no The following example YAML manifest creates an IPPool CR. ```yaml -apiVersion: "crd.antrea.io/v1alpha2" +apiVersion: "crd.antrea.io/v1beta1" kind: IPPool metadata: name: pool1 spec: - ipVersion: 4 ipRanges: - start: "10.2.0.12" end: "10.2.0.20" + subnetInfo: gateway: "10.2.0.1" prefixLength: 24 - vlan: 2 # Default is 0 (untagged). Valid value is 0~4095. + vlan: 2 # Default is 0 (untagged). Valid value is 0~4094. ``` #### IPPool Annotations on Namespace @@ -452,30 +452,30 @@ start and end IP address, both of these IPs will be allocatable (except if one of them corresponds to the gateway). ```yaml -apiVersion: "crd.antrea.io/v1alpha2" +apiVersion: "crd.antrea.io/v1beta1" kind: IPPool metadata: name: ipv4-pool-1 spec: - ipVersion: 4 ipRanges: # 61 different IPs can be allocated from this pool: 64 (2^6) - 3 (network IP, broadcast IP, gateway IP). - cidr: "10.10.1.0/26" + subnetInfo: gateway: "10.10.1.1" prefixLength: 26 ``` ```yaml -apiVersion: "crd.antrea.io/v1alpha2" +apiVersion: "crd.antrea.io/v1beta1" kind: IPPool metadata: name: ipv6-pool-1 spec: - ipVersion: 6 ipRanges: # 257 different IPs can be allocated from this pool: 0x200 - 0x100 + 1. - start: "3ffe:ffff:1:01ff::0100" end: "3ffe:ffff:1:01ff::0200" + subnetInfo: gateway: "3ffe:ffff:1:01ff::1" prefixLength: 64 ``` @@ -484,14 +484,14 @@ When used for Antrea secondary VLAN network, the VLAN set in an `IPPool` IP range will be passed to the VLAN interface configuration. For example: ```yaml -apiVersion: "crd.antrea.io/v1alpha2" +apiVersion: "crd.antrea.io/v1beta1" kind: IPPool metadata: name: ipv4-pool-1 spec: - ipVersion: 4 ipRanges: - cidr: "10.10.1.0/26" + subnetInfo: gateway: "10.10.1.1" prefixLength: 24 vlan: 100 diff --git a/docs/api.md b/docs/api.md index fc71aab4723..972dbfb7e63 100644 --- a/docs/api.md +++ b/docs/api.md @@ -34,7 +34,8 @@ These are the CRDs currently available in `crd.antrea.io`. | `ExternalEntity` | v1alpha2 | v1.0.0 | N/A | N/A | | `ExternalIPPool` | v1beta1 | v1.13.0 | N/A | N/A | | `ExternalNode` | v1alpha1 | v1.8.0 | N/A | N/A | -| `IPPool`| v1alpha2 | v1.4.0 | N/A | N/A | +| `IPPool`| v1alpha2 | v1.4.0 | v2.0.0 | N/A | +| `IPPool`| v1beta1 | v2.0.0 | N/A | N/A | | `Group` | v1beta1 | v1.13.0 | N/A | N/A | | `NetworkPolicy` | v1beta1 | v1.13.0 | N/A | N/A | | `SupportBundleCollection` | v1alpha1 | v1.10.0 | N/A | N/A | diff --git a/docs/cookbooks/multus/README.md b/docs/cookbooks/multus/README.md index 917dbc772e7..0812310051c 100644 --- a/docs/cookbooks/multus/README.md +++ b/docs/cookbooks/multus/README.md @@ -118,15 +118,15 @@ secondary network, please refer to the [Antrea IPAM documentation](../../antrea- ```bash cat <= 0 && bytes.Compare(ip16, rangeEnd.To16()) <= 0 } -func ipVersion(ip net.IP) crdv1alpha2.IPVersion { - if ip.To4() != nil { - return crdv1alpha2.IPv4 - } - return crdv1alpha2.IPv6 -} - -func validateIPRange(r crdv1alpha2.SubnetIPRange, poolIPVersion crdv1alpha2.IPVersion) (bool, string) { - if poolIPVersion == crdv1alpha2.IPv4 { - if r.PrefixLength <= 0 || r.PrefixLength >= 32 { - return false, fmt.Sprintf("Invalid prefix length %d", r.PrefixLength) +func validateIPRange(r crdv1beta1.IPRange, subnetInfo crdv1beta1.SubnetInfo) (bool, string) { + // Verify that prefix length matches IP version + gatewayIPVersion := utilnet.IPFamilyOfString(subnetInfo.Gateway) + if gatewayIPVersion == utilnet.IPv4 { + if subnetInfo.PrefixLength <= 0 || subnetInfo.PrefixLength >= 32 { + return false, fmt.Sprintf("Invalid prefix length %d", subnetInfo.PrefixLength) } - } else if poolIPVersion == crdv1alpha2.IPv6 { - if r.PrefixLength <= 0 || r.PrefixLength >= 128 { - return false, fmt.Sprintf("Invalid prefix length %d", r.PrefixLength) + } else if gatewayIPVersion == utilnet.IPv6 { + if subnetInfo.PrefixLength <= 0 || subnetInfo.PrefixLength >= 128 { + return false, fmt.Sprintf("Invalid prefix length %d", subnetInfo.PrefixLength) } } else { - return false, fmt.Sprintf("Invalid IP version %d", int(poolIPVersion)) + return false, fmt.Sprintf("Invalid IP version for gateway %s", subnetInfo.Gateway) } - // Validate the integrity the IP range: + + // Validate the integrity the IP range: // Verify that all the IP ranges have the same IP family as the IP pool // Verify that the gateway IP is reachable from the IP range - gateway := net.ParseIP(r.Gateway) var mask net.IPMask - if ipVersion(gateway) == crdv1alpha2.IPv4 { - mask = net.CIDRMask(int(r.PrefixLength), 32) + if gatewayIPVersion == utilnet.IPv4 { + mask = net.CIDRMask(int(subnetInfo.PrefixLength), 32) } else { - mask = net.CIDRMask(int(r.PrefixLength), 128) + mask = net.CIDRMask(int(subnetInfo.PrefixLength), 128) } - netCIDR := net.IPNet{IP: gateway, Mask: mask} + netCIDR := net.IPNet{IP: net.ParseIP(subnetInfo.Gateway), Mask: mask} if r.CIDR != "" { _, cidr, _ := net.ParseCIDR(r.CIDR) - if ipVersion(cidr.IP) != poolIPVersion { + if utilnet.IPFamilyOf(cidr.IP) != gatewayIPVersion { return false, fmt.Sprintf( - "Range is invalid. IP version of range %s differs from Pool IP version", r.CIDR) + "Range is invalid. IP version of range %s differs from gateway IP version", r.CIDR) } if !netCIDR.Contains(cidr.IP) { return false, fmt.Sprintf( "Range is invalid. CIDR %s is not contained within subnet %s/%d", - r.CIDR, netCIDR.IP.String(), r.PrefixLength) + r.CIDR, netCIDR.IP.String(), subnetInfo.PrefixLength) } } else { rStart := net.ParseIP(r.Start) rEnd := net.ParseIP(r.End) - if ipVersion(rStart) != poolIPVersion || ipVersion(rEnd) != poolIPVersion { + if utilnet.IPFamilyOf(rStart) != gatewayIPVersion || utilnet.IPFamilyOf(rEnd) != gatewayIPVersion { return false, fmt.Sprintf( - "Range is invalid. IP version of range %s-%s differs from Pool IP version", r.Start, r.End) + "Range is invalid. IP version of range %s-%s differs from gateway IP version", r.Start, r.End) } if !netCIDR.Contains(rStart) || !netCIDR.Contains(rEnd) { return false, fmt.Sprintf( "Range is invalid. range %s-%s is not contained within subnet %s/%d", - r.Start, r.End, netCIDR.IP.String(), r.PrefixLength) + r.Start, r.End, netCIDR.IP.String(), subnetInfo.PrefixLength) } } + return true, "" } diff --git a/pkg/controller/ipam/validate_test.go b/pkg/controller/ipam/validate_test.go index 67607821854..4d259f4c3fb 100644 --- a/pkg/controller/ipam/validate_test.go +++ b/pkg/controller/ipam/validate_test.go @@ -23,47 +23,25 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - crdv1alpha2 "antrea.io/antrea/pkg/apis/crd/v1alpha2" + crdv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1" ) -var testIPPool = &crdv1alpha2.IPPool{ +var testIPPool = &crdv1beta1.IPPool{ ObjectMeta: metav1.ObjectMeta{ Name: "test-ip-pool", }, - Spec: crdv1alpha2.IPPoolSpec{ - IPVersion: crdv1alpha2.IPv4, - IPRanges: []crdv1alpha2.SubnetIPRange{ + Spec: crdv1beta1.IPPoolSpec{ + IPRanges: []crdv1beta1.IPRange{ { - IPRange: crdv1alpha2.IPRange{ - CIDR: "192.168.0.0/24", - }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "192.168.0.1", - PrefixLength: 24, - }, - }, - { - IPRange: crdv1alpha2.IPRange{ - CIDR: "192.168.1.0/24", - }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "192.168.1.1", - PrefixLength: 24, - }, - }, - { - IPRange: crdv1alpha2.IPRange{ - Start: "192.168.3.10", - End: "192.168.3.20", - }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "192.168.3.1", - PrefixLength: 24, - }, + CIDR: "192.168.0.0/26", }, }, + SubnetInfo: crdv1beta1.SubnetInfo{ + Gateway: "192.168.0.1", + PrefixLength: 24, + }, }, - Status: crdv1alpha2.IPPoolStatus{}, + Status: crdv1beta1.IPPoolStatus{}, } func marshal(object runtime.Object) []byte { @@ -71,7 +49,7 @@ func marshal(object runtime.Object) []byte { return raw } -func copyAndMutateIPPool(in *crdv1alpha2.IPPool, mutateFunc func(*crdv1alpha2.IPPool)) *crdv1alpha2.IPPool { +func copyAndMutateIPPool(in *crdv1beta1.IPPool, mutateFunc func(*crdv1beta1.IPPool)) *crdv1beta1.IPPool { out := in.DeepCopy() mutateFunc(out) return out @@ -97,16 +75,16 @@ func TestEgressControllerValidateExternalIPPool(t *testing.T) { request: &admv1.AdmissionRequest{ Name: "foo", Operation: "CREATE", - Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1alpha2.IPPool) { - pool.Spec.IPRanges = append(pool.Spec.IPRanges, crdv1alpha2.SubnetIPRange{ - IPRange: crdv1alpha2.IPRange{ + Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1beta1.IPPool) { + pool.Spec.IPRanges = []crdv1beta1.IPRange{ + { CIDR: "192.168.3.0/26", }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "192.168.3.1", - PrefixLength: 32, - }, - }) + } + pool.Spec.SubnetInfo = crdv1beta1.SubnetInfo{ + Gateway: "192.168.3.1", + PrefixLength: 32, + } }))}, }, expectedResponse: &admv1.AdmissionResponse{ @@ -121,22 +99,26 @@ func TestEgressControllerValidateExternalIPPool(t *testing.T) { request: &admv1.AdmissionRequest{ Name: "foo", Operation: "CREATE", - Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1alpha2.IPPool) { - pool.Spec.IPRanges = append(pool.Spec.IPRanges, crdv1alpha2.SubnetIPRange{ - IPRange: crdv1alpha2.IPRange{ + Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1beta1.IPPool) { + pool.Spec.IPRanges = []crdv1beta1.IPRange{ + { CIDR: "192.168.3.0/26", }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "192.168.3.1", - PrefixLength: 24, + { + Start: "192.168.3.10", + End: "192.168.3.20", }, - }) + } + pool.Spec.SubnetInfo = crdv1beta1.SubnetInfo{ + Gateway: "192.168.3.1", + PrefixLength: 24, + } }))}, }, expectedResponse: &admv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ - Message: "IPRanges [192.168.3.10-192.168.3.20,192.168.3.0/26] overlap", + Message: "IPRanges [192.168.3.0/26,192.168.3.10-192.168.3.20] overlap", }, }, }, @@ -145,22 +127,26 @@ func TestEgressControllerValidateExternalIPPool(t *testing.T) { request: &admv1.AdmissionRequest{ Name: "foo", Operation: "CREATE", - Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1alpha2.IPPool) { - pool.Spec.IPRanges = append(pool.Spec.IPRanges, crdv1alpha2.SubnetIPRange{ - IPRange: crdv1alpha2.IPRange{ + Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1beta1.IPPool) { + pool.Spec.IPRanges = []crdv1beta1.IPRange{ + { CIDR: "192.168.3.12/30", }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "192.168.3.13", - PrefixLength: 24, + { + Start: "192.168.3.10", + End: "192.168.3.20", }, - }) + } + pool.Spec.SubnetInfo = crdv1beta1.SubnetInfo{ + Gateway: "192.168.3.1", + PrefixLength: 24, + } }))}, }, expectedResponse: &admv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ - Message: "IPRanges [192.168.3.10-192.168.3.20,192.168.3.12/30] overlap", + Message: "IPRanges [192.168.3.12/30,192.168.3.10-192.168.3.20] overlap", }, }, }, @@ -169,22 +155,22 @@ func TestEgressControllerValidateExternalIPPool(t *testing.T) { request: &admv1.AdmissionRequest{ Name: "foo", Operation: "CREATE", - Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1alpha2.IPPool) { - pool.Spec.IPRanges = append(pool.Spec.IPRanges, crdv1alpha2.SubnetIPRange{ - IPRange: crdv1alpha2.IPRange{ + Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1beta1.IPPool) { + pool.Spec.IPRanges = []crdv1beta1.IPRange{ + { CIDR: "10:2400::0/96", }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "10:2400::01", - PrefixLength: 24, - }, - }) + } + pool.Spec.SubnetInfo = crdv1beta1.SubnetInfo{ + Gateway: "192.168.3.1", + PrefixLength: 24, + } }))}, }, expectedResponse: &admv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ - Message: "Range is invalid. IP version of range 10:2400::0/96 differs from Pool IP version", + Message: "Range is invalid. IP version of range 10:2400::0/96 differs from gateway IP version", }, }, }, @@ -193,16 +179,16 @@ func TestEgressControllerValidateExternalIPPool(t *testing.T) { request: &admv1.AdmissionRequest{ Name: "foo", Operation: "CREATE", - Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1alpha2.IPPool) { - pool.Spec.IPRanges = append(pool.Spec.IPRanges, crdv1alpha2.SubnetIPRange{ - IPRange: crdv1alpha2.IPRange{ + Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1beta1.IPPool) { + pool.Spec.IPRanges = []crdv1beta1.IPRange{ + { CIDR: "192.168.10.0/26", }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "192.168.1.1", - PrefixLength: 24, - }, - }) + } + pool.Spec.SubnetInfo = crdv1beta1.SubnetInfo{ + Gateway: "192.168.1.1", + PrefixLength: 24, + } }))}, }, expectedResponse: &admv1.AdmissionResponse{ @@ -218,14 +204,14 @@ func TestEgressControllerValidateExternalIPPool(t *testing.T) { Name: "foo", Operation: "UPDATE", OldObject: runtime.RawExtension{Raw: marshal(testIPPool)}, - Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1alpha2.IPPool) { - pool.Spec.IPRanges = pool.Spec.IPRanges[:1] + Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1beta1.IPPool) { + pool.Spec.IPRanges = []crdv1beta1.IPRange{} }))}, }, expectedResponse: &admv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ - Message: "existing IPRanges [192.168.1.0/24,192.168.3.10-192.168.3.20] cannot be updated or deleted", + Message: "existing IPRanges [192.168.0.0/26] cannot be updated or deleted", }, }, }, @@ -235,14 +221,14 @@ func TestEgressControllerValidateExternalIPPool(t *testing.T) { Name: "foo", Operation: "UPDATE", OldObject: runtime.RawExtension{Raw: marshal(testIPPool)}, - Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1alpha2.IPPool) { - pool.Spec.IPRanges[0].Gateway = "192.168.0.2" + Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1beta1.IPPool) { + pool.Spec.IPRanges[0].CIDR = "192.168.1.0/24" }))}, }, expectedResponse: &admv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ - Message: "existing IPRanges [192.168.0.0/24] cannot be updated or deleted", + Message: "existing IPRanges [192.168.0.0/26] cannot be updated or deleted", }, }, }, @@ -252,15 +238,10 @@ func TestEgressControllerValidateExternalIPPool(t *testing.T) { Name: "foo", Operation: "UPDATE", OldObject: runtime.RawExtension{Raw: marshal(testIPPool)}, - Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1alpha2.IPPool) { - pool.Spec.IPRanges = append(pool.Spec.IPRanges, crdv1alpha2.SubnetIPRange{ - IPRange: crdv1alpha2.IPRange{ - CIDR: "192.168.100.0/24", - }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "192.168.100.1", - PrefixLength: 24, - }, + Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1beta1.IPPool) { + pool.Spec.IPRanges = append(pool.Spec.IPRanges, crdv1beta1.IPRange{ + Start: "192.168.0.128", + End: "192.168.0.132", }) }))}, }, @@ -272,23 +253,17 @@ func TestEgressControllerValidateExternalIPPool(t *testing.T) { Name: "foo", Operation: "UPDATE", OldObject: runtime.RawExtension{Raw: marshal(testIPPool)}, - Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1alpha2.IPPool) { - pool.Spec.IPRanges = append(pool.Spec.IPRanges, crdv1alpha2.SubnetIPRange{ - IPRange: crdv1alpha2.IPRange{ - Start: "192.168.3.10", - End: "192.168.3.30", - }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "192.168.3.1", - PrefixLength: 24, - }, + Object: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1beta1.IPPool) { + pool.Spec.IPRanges = append(pool.Spec.IPRanges, crdv1beta1.IPRange{ + Start: "192.168.0.5", + End: "192.168.0.10", }) }))}, }, expectedResponse: &admv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ - Message: "IPRanges [192.168.3.10-192.168.3.30,192.168.3.10-192.168.3.20] overlap", + Message: "IPRanges [192.168.0.5-192.168.0.10,192.168.0.0/26] overlap", }, }, }, @@ -297,8 +272,8 @@ func TestEgressControllerValidateExternalIPPool(t *testing.T) { request: &admv1.AdmissionRequest{ Name: "foo", Operation: "DELETE", - OldObject: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1alpha2.IPPool) { - pool.Status.IPAddresses = []crdv1alpha2.IPAddressState{ + OldObject: runtime.RawExtension{Raw: marshal(copyAndMutateIPPool(testIPPool, func(pool *crdv1beta1.IPPool) { + pool.Status.IPAddresses = []crdv1beta1.IPAddressState{ { IPAddress: "192.168.0.10", }, diff --git a/pkg/ipam/poolallocator/allocator.go b/pkg/ipam/poolallocator/allocator.go index 3319722c21c..036a3778802 100644 --- a/pkg/ipam/poolallocator/allocator.go +++ b/pkg/ipam/poolallocator/allocator.go @@ -20,15 +20,16 @@ import ( "net" "reflect" - "antrea.io/antrea/pkg/apis/crd/v1alpha2" + "antrea.io/antrea/pkg/apis/crd/v1beta1" crdclientset "antrea.io/antrea/pkg/client/clientset/versioned" - informers "antrea.io/antrea/pkg/client/listers/crd/v1alpha2" + informers "antrea.io/antrea/pkg/client/listers/crd/v1beta1" "antrea.io/antrea/pkg/ipam/ipallocator" iputil "antrea.io/antrea/pkg/util/ip" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" + utilnet "k8s.io/utils/net" ) // IPPoolAllocator is responsible for allocating IPs from IP set defined in IPPool CRD. @@ -37,7 +38,7 @@ import ( // only be extended. type IPPoolAllocator struct { // IP version of the IPPool - IPVersion v1alpha2.IPVersion + IPVersion utilnet.IPFamily // Name of IPPool custom resource ipPoolName string @@ -59,7 +60,7 @@ func NewIPPoolAllocator(poolName string, client crdclientset.Interface, poolList } allocator := &IPPoolAllocator{ - IPVersion: pool.Spec.IPVersion, + IPVersion: utilnet.IPFamilyOfString(pool.Spec.SubnetInfo.Gateway), ipPoolName: poolName, crdClient: client, ipPoolLister: poolLister, @@ -68,14 +69,14 @@ func NewIPPoolAllocator(poolName string, client crdclientset.Interface, poolList return allocator, nil } -func (a *IPPoolAllocator) getPool() (*v1alpha2.IPPool, error) { +func (a *IPPoolAllocator) getPool() (*v1beta1.IPPool, error) { pool, err := a.ipPoolLister.Get(a.ipPoolName) return pool, err } // initAllocatorList reads IP Pool status and initializes a list of allocators based on // IP Pool spec and state of allocation recorded in the status -func (a *IPPoolAllocator) initIPAllocators(ipPool *v1alpha2.IPPool) (ipallocator.MultiIPAllocator, error) { +func (a *IPPoolAllocator) initIPAllocators(ipPool *v1beta1.IPPool) (ipallocator.MultiIPAllocator, error) { var allocators ipallocator.MultiIPAllocator @@ -83,14 +84,14 @@ func (a *IPPoolAllocator) initIPAllocators(ipPool *v1alpha2.IPPool) (ipallocator for _, ipRange := range ipPool.Spec.IPRanges { if len(ipRange.CIDR) > 0 { // Reserve gateway address and broadcast address - reservedIPs := []net.IP{net.ParseIP(ipRange.SubnetInfo.Gateway)} + reservedIPs := []net.IP{net.ParseIP(ipPool.Spec.SubnetInfo.Gateway)} _, ipNet, err := net.ParseCIDR(ipRange.CIDR) if err != nil { return nil, err } size, bits := ipNet.Mask.Size() - if int32(size) == ipRange.SubnetInfo.PrefixLength && bits == 32 { + if int32(size) == ipPool.Spec.SubnetInfo.PrefixLength && bits == 32 { // Allocation CIDR covers entire subnet, thus we need // to reserve broadcast IP as well for IPv4 reservedIPs = append(reservedIPs, iputil.GetLocalBroadcastIP(ipNet)) @@ -122,7 +123,7 @@ func (a *IPPoolAllocator) initIPAllocators(ipPool *v1alpha2.IPPool) (ipallocator return allocators, nil } -func (a *IPPoolAllocator) getPoolAndInitIPAllocators() (*v1alpha2.IPPool, ipallocator.MultiIPAllocator, error) { +func (a *IPPoolAllocator) getPoolAndInitIPAllocators() (*v1beta1.IPPool, ipallocator.MultiIPAllocator, error) { ipPool, err := a.getPool() if err != nil { @@ -136,9 +137,9 @@ func (a *IPPoolAllocator) getPoolAndInitIPAllocators() (*v1alpha2.IPPool, ipallo return ipPool, allocators, nil } -func (a *IPPoolAllocator) appendPoolUsage(ipPool *v1alpha2.IPPool, ip net.IP, state v1alpha2.IPAddressPhase, owner v1alpha2.IPAddressOwner) error { +func (a *IPPoolAllocator) appendPoolUsage(ipPool *v1beta1.IPPool, ip net.IP, state v1beta1.IPAddressPhase, owner v1beta1.IPAddressOwner) error { newPool := ipPool.DeepCopy() - usageEntry := v1alpha2.IPAddressState{ + usageEntry := v1beta1.IPAddressState{ IPAddress: ip.String(), Phase: state, Owner: owner, @@ -146,7 +147,7 @@ func (a *IPPoolAllocator) appendPoolUsage(ipPool *v1alpha2.IPPool, ip net.IP, st newPool.Status.IPAddresses = append(newPool.Status.IPAddresses, usageEntry) a.updateUsage(newPool) - _, err := a.crdClient.CrdV1alpha2().IPPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{}) + _, err := a.crdClient.CrdV1beta1().IPPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{}) if err != nil { klog.Warningf("IP Pool %s update with status %+v failed: %+v", newPool.Name, newPool.Status, err) return err @@ -157,7 +158,7 @@ func (a *IPPoolAllocator) appendPoolUsage(ipPool *v1alpha2.IPPool, ip net.IP, st } // updateIPAddressState updates the status of the specified IP in the provided IPPool. It requires the IP is already in the IPAddresses list of the IPPool's status. -func (a *IPPoolAllocator) updateIPAddressState(ipPool *v1alpha2.IPPool, ip net.IP, state v1alpha2.IPAddressPhase, owner v1alpha2.IPAddressOwner) error { +func (a *IPPoolAllocator) updateIPAddressState(ipPool *v1beta1.IPPool, ip net.IP, state v1beta1.IPAddressPhase, owner v1beta1.IPAddressOwner) error { newPool := ipPool.DeepCopy() ipString := ip.String() found := false @@ -174,7 +175,7 @@ func (a *IPPoolAllocator) updateIPAddressState(ipPool *v1alpha2.IPPool, ip net.I return fmt.Errorf("ip %s usage not found in pool %s", ipString, newPool.Name) } - _, err := a.crdClient.CrdV1alpha2().IPPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{}) + _, err := a.crdClient.CrdV1beta1().IPPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{}) if err != nil { klog.Warningf("IP Pool %s update with status %+v failed: %+v", newPool.Name, newPool.Status, err) return err @@ -184,26 +185,26 @@ func (a *IPPoolAllocator) updateIPAddressState(ipPool *v1alpha2.IPPool, ip net.I } -func (a *IPPoolAllocator) appendPoolUsageForStatefulSet(ipPool *v1alpha2.IPPool, ips []net.IP, namespace, name string) error { +func (a *IPPoolAllocator) appendPoolUsageForStatefulSet(ipPool *v1beta1.IPPool, ips []net.IP, namespace, name string) error { newPool := ipPool.DeepCopy() for i, ip := range ips { - owner := v1alpha2.IPAddressOwner{ - StatefulSet: &v1alpha2.StatefulSetOwner{ + owner := v1beta1.IPAddressOwner{ + StatefulSet: &v1beta1.StatefulSetOwner{ Namespace: namespace, Name: name, Index: i, }, } - usageEntry := v1alpha2.IPAddressState{ + usageEntry := v1beta1.IPAddressState{ IPAddress: ip.String(), - Phase: v1alpha2.IPAddressPhaseReserved, + Phase: v1beta1.IPAddressPhaseReserved, Owner: owner, } newPool.Status.IPAddresses = append(newPool.Status.IPAddresses, usageEntry) } - _, err := a.crdClient.CrdV1alpha2().IPPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{}) + _, err := a.crdClient.CrdV1beta1().IPPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{}) if err != nil { klog.Warningf("IP Pool %s update with status %+v failed: %+v", newPool.Name, newPool.Status, err) return err @@ -214,11 +215,11 @@ func (a *IPPoolAllocator) appendPoolUsageForStatefulSet(ipPool *v1alpha2.IPPool, } // removeIPAddressState updates ipPool status to delete released IP allocation, and keeps preallocation information -func (a *IPPoolAllocator) removeIPAddressState(ipPool *v1alpha2.IPPool, ip net.IP) error { +func (a *IPPoolAllocator) removeIPAddressState(ipPool *v1beta1.IPPool, ip net.IP) error { ipString := ip.String() newPool := ipPool.DeepCopy() - var newList []v1alpha2.IPAddressState + var newList []v1beta1.IPAddressState allocated := false for i := range ipPool.Status.IPAddresses { entry := ipPool.Status.IPAddresses[i] @@ -229,7 +230,7 @@ func (a *IPPoolAllocator) removeIPAddressState(ipPool *v1alpha2.IPPool, ip net.I if entry.Owner.StatefulSet != nil { entry = *entry.DeepCopy() entry.Owner.Pod = nil - entry.Phase = v1alpha2.IPAddressPhaseReserved + entry.Phase = v1beta1.IPAddressPhaseReserved newList = append(newList, entry) } } @@ -242,7 +243,7 @@ func (a *IPPoolAllocator) removeIPAddressState(ipPool *v1alpha2.IPPool, ip net.I newPool.Status.IPAddresses = newList a.updateUsage(newPool) - _, err := a.crdClient.CrdV1alpha2().IPPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{}) + _, err := a.crdClient.CrdV1beta1().IPPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{}) if err != nil { klog.Warningf("IP Pool %s update failed: %+v", newPool.Name, err) return err @@ -254,7 +255,7 @@ func (a *IPPoolAllocator) removeIPAddressState(ipPool *v1alpha2.IPPool, ip net.I // getExistingAllocation looks up the existing IP allocation for a Pod network interface, and // returns the IP address and SubnetInfo if found. -func (a *IPPoolAllocator) getExistingAllocation(podOwner *v1alpha2.PodOwner) (net.IP, *v1alpha2.SubnetInfo, error) { +func (a *IPPoolAllocator) getExistingAllocation(podOwner *v1beta1.PodOwner) (net.IP, *v1beta1.SubnetInfo, error) { ip, err := a.GetContainerIP(podOwner.ContainerID, podOwner.IFName) if err != nil { return nil, nil, err @@ -278,15 +279,15 @@ func (a *IPPoolAllocator) getExistingAllocation(podOwner *v1alpha2.PodOwner) (ne if index == -1 { return nil, nil, fmt.Errorf("IP %v does not belong to IPPool %s", ip, a.ipPoolName) } - return ip, &ipPool.Spec.IPRanges[index].SubnetInfo, nil + return ip, &ipPool.Spec.SubnetInfo, nil } // AllocateIP allocates the specified IP. It returns error if the IP is not in the range or already // allocated, or in case CRD failed to update its state. // In case of success, IP pool CRD status is updated with allocated IP/state/resource/container. // AllocateIP returns subnet details for the requested IP, as defined in IP pool spec. -func (a *IPPoolAllocator) AllocateIP(ip net.IP, state v1alpha2.IPAddressPhase, owner v1alpha2.IPAddressOwner) (*v1alpha2.SubnetInfo, error) { - var subnetSpec *v1alpha2.SubnetInfo +func (a *IPPoolAllocator) AllocateIP(ip net.IP, state v1beta1.IPAddressPhase, owner v1beta1.IPAddressOwner) (*v1beta1.SubnetInfo, error) { + var subnetInfo *v1beta1.SubnetInfo // Retry on CRD update conflict which is caused by multiple agents updating a pool at same time. err := retry.RetryOnConflict(retry.DefaultRetry, func() error { ipPool, allocators, err := a.getPoolAndInitIPAllocators() @@ -311,7 +312,7 @@ func (a *IPPoolAllocator) AllocateIP(ip net.IP, state v1alpha2.IPAddressPhase, o return fmt.Errorf("IP %v does not belong to IP pool %s", ip, a.ipPoolName) } - subnetSpec = &ipPool.Spec.IPRanges[index].SubnetInfo + subnetInfo = &ipPool.Spec.SubnetInfo err = a.appendPoolUsage(ipPool, ip, state, owner) return err @@ -320,18 +321,18 @@ func (a *IPPoolAllocator) AllocateIP(ip net.IP, state v1alpha2.IPAddressPhase, o if err != nil { klog.Errorf("Failed to allocate IP address %s from pool %s: %+v", ip, a.ipPoolName, err) } - return subnetSpec, err + return subnetInfo, err } // AllocateNext allocates the next available IP. It returns error if pool is exausted, // or in case CRD failed to update its state. // In case of success, IPPool CRD status is updated with allocated IP/state/resource/container. // AllocateIP returns subnet details for the requested IP, as defined in IP pool spec. -func (a *IPPoolAllocator) AllocateNext(state v1alpha2.IPAddressPhase, owner v1alpha2.IPAddressOwner) (net.IP, *v1alpha2.SubnetInfo, error) { +func (a *IPPoolAllocator) AllocateNext(state v1beta1.IPAddressPhase, owner v1beta1.IPAddressOwner) (net.IP, *v1beta1.SubnetInfo, error) { podOwner := owner.Pod // Same resource can not ask for allocation twice without release. // This needs to be verified even at the expense of another API call. - ip, subnetSpec, err := a.getExistingAllocation(podOwner) + ip, subnetInfo, err := a.getExistingAllocation(podOwner) if err != nil { return nil, nil, err } @@ -340,7 +341,7 @@ func (a *IPPoolAllocator) AllocateNext(state v1alpha2.IPAddressPhase, owner v1al // allocation failure, not all allocated IPs were successfully released, and then // CNI ADD is retried. klog.InfoS("Container already has an IP allocated", "container", podOwner.ContainerID, "interface", podOwner.IFName, "IPPool", a.ipPoolName) - return ip, subnetSpec, err + return ip, subnetInfo, err } // Retry on CRD update conflict which is caused by multiple agents updating a pool at same time. @@ -365,21 +366,21 @@ func (a *IPPoolAllocator) AllocateNext(state v1alpha2.IPAddressPhase, owner v1al return fmt.Errorf("failed to allocate IP: Pool %s is exausted", a.ipPoolName) } - subnetSpec = &ipPool.Spec.IPRanges[index].SubnetInfo + subnetInfo = &ipPool.Spec.SubnetInfo return a.appendPoolUsage(ipPool, ip, state, owner) }) if err != nil { klog.ErrorS(err, "Failed to allocate from IPPool", "IPPool", a.ipPoolName) } - return ip, subnetSpec, err + return ip, subnetInfo, err } // AllocateReservedOrNext allocates the reserved IP if it exists, else allocates next available IP. // It returns error if pool is exhausted, or in case it fails to update IPPool's state. In case of // success, IP pool status is updated with allocated IP/state/resource/container. // AllocateReservedOrNext returns subnet details for the requested IP, as defined in IP pool spec. -func (a *IPPoolAllocator) AllocateReservedOrNext(state v1alpha2.IPAddressPhase, owner v1alpha2.IPAddressOwner) (net.IP, *v1alpha2.SubnetInfo, error) { +func (a *IPPoolAllocator) AllocateReservedOrNext(state v1beta1.IPAddressPhase, owner v1beta1.IPAddressOwner) (net.IP, *v1beta1.SubnetInfo, error) { ip, err := a.getReservedIP(owner) if err != nil { return nil, nil, err @@ -390,15 +391,15 @@ func (a *IPPoolAllocator) AllocateReservedOrNext(state v1alpha2.IPAddressPhase, } var prevIP net.IP - var subnetSpec *v1alpha2.SubnetInfo + var subnetInfo *v1beta1.SubnetInfo podOwner := owner.Pod - prevIP, subnetSpec, err = a.getExistingAllocation(podOwner) + prevIP, subnetInfo, err = a.getExistingAllocation(podOwner) if err != nil { return nil, nil, err } if prevIP != nil { klog.InfoS("Container already has an IP allocated", "container", podOwner.ContainerID, "interface", podOwner.IFName, "IPPool", a.ipPoolName) - return prevIP, subnetSpec, err + return prevIP, subnetInfo, err } // Retry on CRD update conflict which is caused by multiple agents updating a pool at same time. @@ -421,14 +422,14 @@ func (a *IPPoolAllocator) AllocateReservedOrNext(state v1alpha2.IPAddressPhase, return fmt.Errorf("IP %v does not belong to IPPool %s", ip, a.ipPoolName) } - subnetSpec = &ipPool.Spec.IPRanges[index].SubnetInfo + subnetInfo = &ipPool.Spec.SubnetInfo return a.updateIPAddressState(ipPool, ip, state, owner) }) if err != nil { klog.ErrorS(err, "Failed to allocate IP address", "ip", ip, "IPPool", a.ipPoolName) } - return ip, subnetSpec, err + return ip, subnetInfo, err } // AllocateStatefulSet pre-allocates continuous range of IPs for StatefulSet. @@ -511,7 +512,7 @@ func (a *IPPoolAllocator) ReleaseStatefulSet(namespace, name string) error { return err } - var updatedAdresses []v1alpha2.IPAddressState + var updatedAdresses []v1beta1.IPAddressState for _, ip := range ipPool.Status.IPAddresses { if ip.Owner.StatefulSet == nil || ip.Owner.StatefulSet.Namespace != namespace || ip.Owner.StatefulSet.Name != name { updatedAdresses = append(updatedAdresses, ip) @@ -527,7 +528,7 @@ func (a *IPPoolAllocator) ReleaseStatefulSet(namespace, name string) error { newPool := ipPool.DeepCopy() newPool.Status.IPAddresses = updatedAdresses - _, err = a.crdClient.CrdV1alpha2().IPPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{}) + _, err = a.crdClient.CrdV1beta1().IPPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{}) if err != nil { klog.Warningf("IP Pool %s update failed: %+v", newPool.Name, err) return err @@ -607,7 +608,7 @@ func (a *IPPoolAllocator) GetContainerIP(containerID, ifName string) (net.IP, er } // getReservedIP checks whether an IP was reserved with specified owner. It returns error if the resource crd fails to be retrieved. -func (a *IPPoolAllocator) getReservedIP(reservedOwner v1alpha2.IPAddressOwner) (net.IP, error) { +func (a *IPPoolAllocator) getReservedIP(reservedOwner v1beta1.IPAddressOwner) (net.IP, error) { ipPool, err := a.getPool() if err != nil { return nil, err @@ -631,7 +632,7 @@ func (a IPPoolAllocator) Total() int { return allocators.Total() } -func (a *IPPoolAllocator) updateUsage(ipPool *v1alpha2.IPPool) { +func (a *IPPoolAllocator) updateUsage(ipPool *v1beta1.IPPool) { ipPool.Status.Usage.Total = a.Total() ipPool.Status.Usage.Used = len(ipPool.Status.IPAddresses) } diff --git a/pkg/ipam/poolallocator/allocator_test.go b/pkg/ipam/poolallocator/allocator_test.go index 5c53c5ce161..00c4cdfe901 100644 --- a/pkg/ipam/poolallocator/allocator_test.go +++ b/pkg/ipam/poolallocator/allocator_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - crdv1a2 "antrea.io/antrea/pkg/apis/crd/v1alpha2" + crdv1b1 "antrea.io/antrea/pkg/apis/crd/v1beta1" informers "antrea.io/antrea/pkg/client/informers/externalversions" fakepoolclient "antrea.io/antrea/pkg/ipam/poolallocator/testing" @@ -38,20 +38,20 @@ import ( var testNamespace = "test" -var fakePodOwner = crdv1a2.IPAddressOwner{ - Pod: &crdv1a2.PodOwner{ +var fakePodOwner = crdv1b1.IPAddressOwner{ + Pod: &crdv1b1.PodOwner{ Name: "fakePod", Namespace: testNamespace, ContainerID: uuid.New().String(), }, } -func newTestIPPoolAllocator(pool *crdv1a2.IPPool, stopCh <-chan struct{}) *IPPoolAllocator { +func newTestIPPoolAllocator(pool *crdv1b1.IPPool, stopCh <-chan struct{}) *IPPoolAllocator { crdClient := fakepoolclient.NewIPPoolClient() crdInformerFactory := informers.NewSharedInformerFactory(crdClient, 0) - pools := crdInformerFactory.Crd().V1alpha2().IPPools() + pools := crdInformerFactory.Crd().V1beta1().IPPools() poolInformer := pools.Informer() go crdInformerFactory.Start(stopCh) @@ -71,18 +71,18 @@ func newTestIPPoolAllocator(pool *crdv1a2.IPPool, stopCh <-chan struct{}) *IPPoo return allocator } -func validateAllocationSequence(t *testing.T, allocator *IPPoolAllocator, subnetInfo crdv1a2.SubnetInfo, ipList []string) { +func validateAllocationSequence(t *testing.T, allocator *IPPoolAllocator, subnetInfo crdv1b1.SubnetInfo, ipList []string) { i := 1 for _, expectedIP := range ipList { klog.Info("Validating allocation for ", expectedIP) - owner := crdv1a2.IPAddressOwner{ - Pod: &crdv1a2.PodOwner{ + owner := crdv1b1.IPAddressOwner{ + Pod: &crdv1b1.PodOwner{ Name: fmt.Sprintf("fakePod%d", i), Namespace: testNamespace, ContainerID: uuid.New().String(), }, } - ip, returnInfo, err := allocator.AllocateNext(crdv1a2.IPAddressPhaseAllocated, owner) + ip, returnInfo, err := allocator.AllocateNext(crdv1b1.IPAddressPhaseAllocated, owner) require.NoError(t, err) assert.Equal(t, net.ParseIP(expectedIP), ip) assert.Equal(t, subnetInfo, *returnInfo) @@ -95,21 +95,19 @@ func TestAllocateIP(t *testing.T) { defer close(stopCh) poolName := uuid.New().String() - ipRange := crdv1a2.IPRange{ + ipRange := crdv1b1.IPRange{ Start: "10.2.2.100", End: "10.2.2.120", } - subnetInfo := crdv1a2.SubnetInfo{ + subnetInfo := crdv1b1.SubnetInfo{ Gateway: "10.2.2.1", PrefixLength: 24, VLAN: 100, } - subnetRange := crdv1a2.SubnetIPRange{IPRange: ipRange, - SubnetInfo: subnetInfo} - pool := crdv1a2.IPPool{ + pool := crdv1b1.IPPool{ ObjectMeta: metav1.ObjectMeta{Name: poolName}, - Spec: crdv1a2.IPPoolSpec{IPRanges: []crdv1a2.SubnetIPRange{subnetRange}}, + Spec: crdv1b1.IPPoolSpec{IPRanges: []crdv1b1.IPRange{ipRange}, SubnetInfo: subnetInfo}, } allocator := newTestIPPoolAllocator(&pool, stopCh) @@ -117,19 +115,19 @@ func TestAllocateIP(t *testing.T) { assert.Equal(t, 21, allocator.Total()) // Allocate specific IP from the range - returnInfo, err := allocator.AllocateIP(net.ParseIP("10.2.2.101"), crdv1a2.IPAddressPhaseAllocated, fakePodOwner) + returnInfo, err := allocator.AllocateIP(net.ParseIP("10.2.2.101"), crdv1b1.IPAddressPhaseAllocated, fakePodOwner) assert.Equal(t, subnetInfo, *returnInfo) require.NoError(t, err) // Validate IP outside the range is not allocated - _, err = allocator.AllocateIP(net.ParseIP("10.2.2.121"), crdv1a2.IPAddressPhaseAllocated, fakePodOwner) + _, err = allocator.AllocateIP(net.ParseIP("10.2.2.121"), crdv1b1.IPAddressPhaseAllocated, fakePodOwner) require.Error(t, err) // Make sure IP allocated above is not allocated again validateAllocationSequence(t, allocator, subnetInfo, []string{"10.2.2.100", "10.2.2.102"}) // Validate error is returned if IP is already allocated - _, err = allocator.AllocateIP(net.ParseIP("10.2.2.102"), crdv1a2.IPAddressPhaseAllocated, fakePodOwner) + _, err = allocator.AllocateIP(net.ParseIP("10.2.2.102"), crdv1b1.IPAddressPhaseAllocated, fakePodOwner) require.Error(t, err) } @@ -138,20 +136,18 @@ func TestAllocateNext(t *testing.T) { defer close(stopCh) poolName := "fakePool" - ipRange := crdv1a2.IPRange{ + ipRange := crdv1b1.IPRange{ Start: "10.2.2.100", End: "10.2.2.120", } - subnetInfo := crdv1a2.SubnetInfo{ + subnetInfo := crdv1b1.SubnetInfo{ Gateway: "10.2.2.1", PrefixLength: 24, } - subnetRange := crdv1a2.SubnetIPRange{IPRange: ipRange, - SubnetInfo: subnetInfo} - pool := crdv1a2.IPPool{ + pool := crdv1b1.IPPool{ ObjectMeta: metav1.ObjectMeta{Name: poolName}, - Spec: crdv1a2.IPPoolSpec{IPRanges: []crdv1a2.SubnetIPRange{subnetRange}}, + Spec: crdv1b1.IPPoolSpec{IPRanges: []crdv1b1.IPRange{ipRange}, SubnetInfo: subnetInfo}, } allocator := newTestIPPoolAllocator(&pool, stopCh) @@ -166,24 +162,20 @@ func TestAllocateNextMultiRange(t *testing.T) { defer close(stopCh) poolName := uuid.New().String() - ipRange1 := crdv1a2.IPRange{ + ipRange1 := crdv1b1.IPRange{ Start: "10.2.2.100", End: "10.2.2.101", } - ipRange2 := crdv1a2.IPRange{CIDR: "10.2.2.0/28"} - subnetInfo := crdv1a2.SubnetInfo{ + ipRange2 := crdv1b1.IPRange{CIDR: "10.2.2.0/28"} + subnetInfo := crdv1b1.SubnetInfo{ Gateway: "10.2.2.1", PrefixLength: 24, } - subnetRange1 := crdv1a2.SubnetIPRange{IPRange: ipRange1, - SubnetInfo: subnetInfo} - subnetRange2 := crdv1a2.SubnetIPRange{IPRange: ipRange2, - SubnetInfo: subnetInfo} - pool := crdv1a2.IPPool{ + pool := crdv1b1.IPPool{ ObjectMeta: metav1.ObjectMeta{Name: poolName}, - Spec: crdv1a2.IPPoolSpec{ - IPRanges: []crdv1a2.SubnetIPRange{subnetRange1, subnetRange2}}, + Spec: crdv1b1.IPPoolSpec{ + IPRanges: []crdv1b1.IPRange{ipRange1, ipRange2}, SubnetInfo: subnetInfo}, } allocator := newTestIPPoolAllocator(&pool, stopCh) @@ -199,27 +191,23 @@ func TestAllocateNextMultiRangeExausted(t *testing.T) { defer close(stopCh) poolName := uuid.New().String() - ipRange1 := crdv1a2.IPRange{ + ipRange1 := crdv1b1.IPRange{ Start: "10.2.2.100", End: "10.2.2.101", } - ipRange2 := crdv1a2.IPRange{ + ipRange2 := crdv1b1.IPRange{ Start: "10.2.2.200", End: "10.2.2.200", } - subnetInfo := crdv1a2.SubnetInfo{ + subnetInfo := crdv1b1.SubnetInfo{ Gateway: "10.2.2.1", PrefixLength: 24, } - subnetRange1 := crdv1a2.SubnetIPRange{IPRange: ipRange1, - SubnetInfo: subnetInfo} - subnetRange2 := crdv1a2.SubnetIPRange{IPRange: ipRange2, - SubnetInfo: subnetInfo} - - pool := crdv1a2.IPPool{ + pool := crdv1b1.IPPool{ ObjectMeta: metav1.ObjectMeta{Name: poolName}, - Spec: crdv1a2.IPPoolSpec{ - IPRanges: []crdv1a2.SubnetIPRange{subnetRange1, subnetRange2}}, + Spec: crdv1b1.IPPoolSpec{ + IPRanges: []crdv1b1.IPRange{ipRange1, ipRange2}, + SubnetInfo: subnetInfo}, } allocator := newTestIPPoolAllocator(&pool, stopCh) @@ -230,7 +218,7 @@ func TestAllocateNextMultiRangeExausted(t *testing.T) { validateAllocationSequence(t, allocator, subnetInfo, []string{"10.2.2.100", "10.2.2.101", "10.2.2.200"}) // Allocate next IP and get error - _, _, err := allocator.AllocateNext(crdv1a2.IPAddressPhaseAllocated, fakePodOwner) + _, _, err := allocator.AllocateNext(crdv1b1.IPAddressPhaseAllocated, fakePodOwner) require.Error(t, err) } @@ -239,24 +227,21 @@ func TestAllocateReleaseSequence(t *testing.T) { defer close(stopCh) poolName := uuid.New().String() - ipRange1 := crdv1a2.IPRange{ + ipRange1 := crdv1b1.IPRange{ Start: "2001::1000", End: "2001::1000", } - ipRange2 := crdv1a2.IPRange{CIDR: "2001::0/124"} - subnetInfo := crdv1a2.SubnetInfo{ + ipRange2 := crdv1b1.IPRange{CIDR: "2001::0/124"} + subnetInfo := crdv1b1.SubnetInfo{ Gateway: "2001::1", PrefixLength: 64, } - subnetRange1 := crdv1a2.SubnetIPRange{IPRange: ipRange1, - SubnetInfo: subnetInfo} - subnetRange2 := crdv1a2.SubnetIPRange{IPRange: ipRange2, - SubnetInfo: subnetInfo} - pool := crdv1a2.IPPool{ + pool := crdv1b1.IPPool{ ObjectMeta: metav1.ObjectMeta{Name: poolName}, - Spec: crdv1a2.IPPoolSpec{ - IPRanges: []crdv1a2.SubnetIPRange{subnetRange1, subnetRange2}}, + Spec: crdv1b1.IPPoolSpec{ + IPRanges: []crdv1b1.IPRange{ipRange1, ipRange2}, + SubnetInfo: subnetInfo}, } allocator := newTestIPPoolAllocator(&pool, stopCh) @@ -307,24 +292,21 @@ func TestReleaseResource(t *testing.T) { defer close(stopCh) poolName := uuid.New().String() - ipRange1 := crdv1a2.IPRange{ + ipRange1 := crdv1b1.IPRange{ Start: "2001::1000", End: "2001::1000", } - ipRange2 := crdv1a2.IPRange{CIDR: "2001::0/124"} - subnetInfo := crdv1a2.SubnetInfo{ + ipRange2 := crdv1b1.IPRange{CIDR: "2001::0/124"} + subnetInfo := crdv1b1.SubnetInfo{ Gateway: "2001::1", PrefixLength: 64, } - subnetRange1 := crdv1a2.SubnetIPRange{IPRange: ipRange1, - SubnetInfo: subnetInfo} - subnetRange2 := crdv1a2.SubnetIPRange{IPRange: ipRange2, - SubnetInfo: subnetInfo} - pool := crdv1a2.IPPool{ + pool := crdv1b1.IPPool{ ObjectMeta: metav1.ObjectMeta{Name: poolName}, - Spec: crdv1a2.IPPoolSpec{ - IPRanges: []crdv1a2.SubnetIPRange{subnetRange1, subnetRange2}}, + Spec: crdv1b1.IPPoolSpec{ + IPRanges: []crdv1b1.IPRange{ipRange1, ipRange2}, + SubnetInfo: subnetInfo}, } allocator := newTestIPPoolAllocator(&pool, stopCh) @@ -347,8 +329,8 @@ func TestHas(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - owner := crdv1a2.IPAddressOwner{ - Pod: &crdv1a2.PodOwner{ + owner := crdv1b1.IPAddressOwner{ + Pod: &crdv1b1.PodOwner{ Name: "fakePod", Namespace: testNamespace, ContainerID: "fakeContainer", @@ -356,27 +338,25 @@ func TestHas(t *testing.T) { }, } poolName := uuid.New().String() - ipRange1 := crdv1a2.IPRange{ + ipRange1 := crdv1b1.IPRange{ Start: "2001::1000", End: "2001::1000", } - subnetInfo := crdv1a2.SubnetInfo{ + subnetInfo := crdv1b1.SubnetInfo{ Gateway: "2001::1", PrefixLength: 64, } - subnetRange1 := crdv1a2.SubnetIPRange{IPRange: ipRange1, - SubnetInfo: subnetInfo} - - pool := crdv1a2.IPPool{ + pool := crdv1b1.IPPool{ ObjectMeta: metav1.ObjectMeta{Name: poolName}, - Spec: crdv1a2.IPPoolSpec{ - IPRanges: []crdv1a2.SubnetIPRange{subnetRange1}}, + Spec: crdv1b1.IPPoolSpec{ + IPRanges: []crdv1b1.IPRange{ipRange1}, + SubnetInfo: subnetInfo}, } allocator := newTestIPPoolAllocator(&pool, stopCh) require.NotNil(t, allocator) - _, _, err := allocator.AllocateNext(crdv1a2.IPAddressPhaseAllocated, owner) + _, _, err := allocator.AllocateNext(crdv1b1.IPAddressPhaseAllocated, owner) require.NoError(t, err) require.Eventually(t, func() bool { has, _ := allocator.hasPod(testNamespace, "fakePod") @@ -404,20 +384,18 @@ func TestAllocateReleaseStatefulSet(t *testing.T) { poolName := uuid.New().String() setName := "fakeSet" - ipRange := crdv1a2.IPRange{ + ipRange := crdv1b1.IPRange{ Start: "10.2.2.100", End: "10.2.2.120", } - subnetInfo := crdv1a2.SubnetInfo{ + subnetInfo := crdv1b1.SubnetInfo{ Gateway: "10.2.2.1", PrefixLength: 24, } - subnetRange := crdv1a2.SubnetIPRange{IPRange: ipRange, - SubnetInfo: subnetInfo} - pool := crdv1a2.IPPool{ + pool := crdv1b1.IPPool{ ObjectMeta: metav1.ObjectMeta{Name: poolName}, - Spec: crdv1a2.IPPoolSpec{IPRanges: []crdv1a2.SubnetIPRange{subnetRange}}, + Spec: crdv1b1.IPPoolSpec{IPRanges: []crdv1b1.IPRange{ipRange}, SubnetInfo: subnetInfo}, } allocator := newTestIPPoolAllocator(&pool, stopCh) diff --git a/pkg/ipam/poolallocator/testing/fake_client.go b/pkg/ipam/poolallocator/testing/fake_client.go index 2333d56a646..ca81cad8667 100644 --- a/pkg/ipam/poolallocator/testing/fake_client.go +++ b/pkg/ipam/poolallocator/testing/fake_client.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/watch" k8stesting "k8s.io/client-go/testing" - crdv1a2 "antrea.io/antrea/pkg/apis/crd/v1alpha2" + crdv1b1 "antrea.io/antrea/pkg/apis/crd/v1beta1" fakeversioned "antrea.io/antrea/pkg/client/clientset/versioned/fake" ) @@ -41,7 +41,7 @@ type IPPoolClientset struct { watcher *watch.RaceFreeFakeWatcher } -func (c *IPPoolClientset) InitPool(pool *crdv1a2.IPPool) { +func (c *IPPoolClientset) InitPool(pool *crdv1b1.IPPool) { pool.ResourceVersion = uuid.New().String() c.poolVersion.Store(pool.Name, pool.ResourceVersion) @@ -54,7 +54,7 @@ func NewIPPoolClient() *IPPoolClientset { poolVersion: sync.Map{}} crdClient.AddReactor("update", "ippools", func(action k8stesting.Action) (bool, runtime.Object, error) { - updatedPool := action.(k8stesting.UpdateAction).GetObject().(*crdv1a2.IPPool) + updatedPool := action.(k8stesting.UpdateAction).GetObject().(*crdv1b1.IPPool) obj, exists := crdClient.poolVersion.Load(updatedPool.Name) if !exists { return false, nil, nil diff --git a/test/e2e-secondary-network/infra/secondary-networks.yml b/test/e2e-secondary-network/infra/secondary-networks.yml index 5495dd18394..6610f2b8749 100644 --- a/test/e2e-secondary-network/infra/secondary-networks.yml +++ b/test/e2e-secondary-network/infra/secondary-networks.yml @@ -1,50 +1,50 @@ --- -apiVersion: "crd.antrea.io/v1alpha2" +apiVersion: "crd.antrea.io/v1beta1" kind: IPPool metadata: name: secnet-ipv4-1 spec: - ipVersion: 4 ipRanges: - cidr: "148.14.24.0/24" + subnetInfo: gateway: "148.14.24.1" prefixLength: 24 --- -apiVersion: "crd.antrea.io/v1alpha2" +apiVersion: "crd.antrea.io/v1beta1" kind: IPPool metadata: name: secnet-ipv4-2 spec: - ipVersion: 4 ipRanges: - start: "148.14.25.111" end: "148.14.25.123" + subnetInfo: gateway: "148.14.25.1" prefixLength: 24 --- -apiVersion: "crd.antrea.io/v1alpha2" +apiVersion: "crd.antrea.io/v1beta1" kind: IPPool metadata: name: secnet-ipv4-3 spec: - ipVersion: 4 ipRanges: - cidr: "148.14.26.0/24" + subnetInfo: gateway: "148.14.26.1" prefixLength: 24 vlan: 300 --- -apiVersion: "crd.antrea.io/v1alpha2" +apiVersion: "crd.antrea.io/v1beta1" kind: IPPool metadata: name: secnet-ipv6-3 spec: - ipVersion: 6 ipRanges: - cidr: "10:2400::0/96" + subnetInfo: gateway: "10:2400::1" prefixLength: 64 diff --git a/test/e2e/antreaipam_test.go b/test/e2e/antreaipam_test.go index db5fc8f2a9e..be2e35e5d28 100644 --- a/test/e2e/antreaipam_test.go +++ b/test/e2e/antreaipam_test.go @@ -33,79 +33,124 @@ import ( utilnet "k8s.io/utils/net" crdv1alpha2 "antrea.io/antrea/pkg/apis/crd/v1alpha2" + crdv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1" annotation "antrea.io/antrea/pkg/ipam" ) var ( - subnetIPv4RangesMap = map[string]crdv1alpha2.IPPool{ + subnetIPv4RangesMap = map[string]crdv1beta1.IPPool{ testAntreaIPAMNamespace: { ObjectMeta: metav1.ObjectMeta{ Name: "test-ippool-ipv4-0", }, - Spec: crdv1alpha2.IPPoolSpec{ - IPVersion: crdv1alpha2.IPv4, - IPRanges: []crdv1alpha2.SubnetIPRange{{IPRange: crdv1alpha2.IPRange{ - CIDR: "", - Start: "192.168.240.100", - End: "192.168.240.129", + Spec: crdv1beta1.IPPoolSpec{ + IPRanges: []crdv1beta1.IPRange{ + { + CIDR: "", + Start: "192.168.240.100", + End: "192.168.240.129", + }, + }, + SubnetInfo: crdv1beta1.SubnetInfo{ + Gateway: "192.168.240.1", + PrefixLength: 24, }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "192.168.240.1", - PrefixLength: 24, - }}}, }, }, "1": { ObjectMeta: metav1.ObjectMeta{ Name: "test-ippool-ipv4-1", }, - Spec: crdv1alpha2.IPPoolSpec{ - IPVersion: crdv1alpha2.IPv4, - IPRanges: []crdv1alpha2.SubnetIPRange{{IPRange: crdv1alpha2.IPRange{ - CIDR: "", - Start: "192.168.240.130", - End: "192.168.240.139", + Spec: crdv1beta1.IPPoolSpec{ + IPRanges: []crdv1beta1.IPRange{ + { + CIDR: "", + Start: "192.168.240.130", + End: "192.168.240.139", + }, + }, + SubnetInfo: crdv1beta1.SubnetInfo{ + Gateway: "192.168.240.1", + PrefixLength: 24, }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "192.168.240.1", - PrefixLength: 24, - }}}, }, }, testAntreaIPAMNamespace11: { ObjectMeta: metav1.ObjectMeta{ Name: "test-ippool-ipv4-11", }, - Spec: crdv1alpha2.IPPoolSpec{ - IPVersion: 4, - IPRanges: []crdv1alpha2.SubnetIPRange{{IPRange: crdv1alpha2.IPRange{ - CIDR: "", - Start: "192.168.241.100", - End: "192.168.241.129", + Spec: crdv1beta1.IPPoolSpec{ + IPRanges: []crdv1beta1.IPRange{ + { + CIDR: "", + Start: "192.168.241.100", + End: "192.168.241.129", + }, + }, + SubnetInfo: crdv1beta1.SubnetInfo{ + Gateway: "192.168.241.1", + PrefixLength: 24, + VLAN: 11, }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "192.168.241.1", - PrefixLength: 24, - VLAN: 11, - }}}, }, }, testAntreaIPAMNamespace12: { ObjectMeta: metav1.ObjectMeta{ Name: "test-ippool-ipv4-12", }, - Spec: crdv1alpha2.IPPoolSpec{ - IPVersion: 4, - IPRanges: []crdv1alpha2.SubnetIPRange{{IPRange: crdv1alpha2.IPRange{ - CIDR: "", - Start: "192.168.242.100", - End: "192.168.242.129", + Spec: crdv1beta1.IPPoolSpec{ + IPRanges: []crdv1beta1.IPRange{ + { + CIDR: "", + Start: "192.168.242.100", + End: "192.168.242.129", + }, }, + SubnetInfo: crdv1beta1.SubnetInfo{ + Gateway: "192.168.242.1", + PrefixLength: 24, + VLAN: 12, + }, + }, + }, + } + + v1a1Pool = crdv1alpha2.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ippool-v1alpha1", + }, + Spec: crdv1alpha2.IPPoolSpec{ + IPVersion: crdv1alpha2.IPv4, + IPRanges: []crdv1alpha2.SubnetIPRange{ + { + IPRange: crdv1alpha2.IPRange{ + Start: "10.2.0.12", + End: "10.2.0.20", + }, SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "192.168.242.1", + Gateway: "10.2.0.1", PrefixLength: 24, - VLAN: 12, - }}}, + VLAN: 2, + }, + }, + }, + }, + } + + v1b1Pool = crdv1beta1.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ippool-v1beta1", + }, + Spec: crdv1beta1.IPPoolSpec{ + IPRanges: []crdv1beta1.IPRange{ + { + CIDR: "10.10.1.1/26", + }, + }, + SubnetInfo: crdv1beta1.SubnetInfo{ + Gateway: "10.10.1.1", + PrefixLength: 24, + VLAN: 2, }, }, } @@ -149,6 +194,10 @@ func TestAntreaIPAM(t *testing.T) { defer deleteIPPoolWrapper(t, data, ipPool.Name) ipPools = append(ipPools, ipPool.Name) + t.Run("testIPPoolConversion", func(t *testing.T) { + testIPPoolConversion(t, data) + }) + // connectivity test with antrea redeploy t.Run("testAntreaIPAMPodConnectivityAfterAntreaRestart", func(t *testing.T) { skipIfHasWindowsNodes(t) @@ -259,6 +308,32 @@ func TestAntreaIPAM(t *testing.T) { }) } +func testIPPoolConversion(t *testing.T, data *TestData) { + _, err := data.crdClient.CrdV1alpha2().IPPools().Create(context.TODO(), &v1a1Pool, metav1.CreateOptions{}) + assert.NoError(t, err, "failed to create v1alpha2 IPPool") + defer deleteIPPoolWrapper(t, data, v1a1Pool.Name) + v1beta1Pool, err := data.crdClient.CrdV1beta1().IPPools().Get(context.TODO(), v1a1Pool.Name, metav1.GetOptions{}) + assert.NoError(t, err, "failed to get v1beta1 IPPool") + assert.Equal(t, v1a1Pool.Name, v1beta1Pool.Name) + assert.Equal(t, v1a1Pool.Spec.IPRanges[0].Start, v1beta1Pool.Spec.IPRanges[0].Start) + assert.Equal(t, v1a1Pool.Spec.IPRanges[0].End, v1beta1Pool.Spec.IPRanges[0].End) + assert.Equal(t, v1a1Pool.Spec.IPRanges[0].Gateway, v1beta1Pool.Spec.SubnetInfo.Gateway) + assert.Equal(t, v1a1Pool.Spec.IPRanges[0].PrefixLength, v1beta1Pool.Spec.SubnetInfo.PrefixLength) + assert.Equal(t, int32(v1a1Pool.Spec.IPRanges[0].VLAN), v1beta1Pool.Spec.SubnetInfo.VLAN) + + _, err = data.crdClient.CrdV1beta1().IPPools().Create(context.TODO(), &v1b1Pool, metav1.CreateOptions{}) + defer deleteIPPoolWrapper(t, data, v1b1Pool.Name) + assert.NoError(t, err, "failed to create v1beta1 IPPool") + v1alpha2Pool, err := data.crdClient.CrdV1alpha2().IPPools().Get(context.TODO(), v1b1Pool.Name, metav1.GetOptions{}) + assert.NoError(t, err, "failed to get v1alpha2 IPPool") + assert.Equal(t, v1b1Pool.Name, v1alpha2Pool.Name) + assert.Equal(t, v1b1Pool.Spec.IPRanges[0].CIDR, v1alpha2Pool.Spec.IPRanges[0].CIDR) + assert.Equal(t, v1b1Pool.Spec.SubnetInfo.Gateway, v1alpha2Pool.Spec.IPRanges[0].Gateway) + assert.Equal(t, v1b1Pool.Spec.SubnetInfo.PrefixLength, v1alpha2Pool.Spec.IPRanges[0].PrefixLength) + assert.Equal(t, v1b1Pool.Spec.SubnetInfo.VLAN, int32(v1alpha2Pool.Spec.IPRanges[0].VLAN)) + +} + func testAntreaIPAMPodConnectivitySameNode(t *testing.T, data *TestData) { numPods := 2 // Two AntreaIPAM Pods, can be increased PodInfos := make([]PodInfo, numPods) @@ -391,11 +466,11 @@ func testAntreaIPAMStatefulSet(t *testing.T, data *TestData, dedicatedIPPoolKey } expectedPodIP = utilnet.AddIPOffset(utilnet.BigForIP(net.ParseIP(startIPString)), offset) assert.True(t, isBelongTo) - assert.True(t, reflect.DeepEqual(ipAddressState, &crdv1alpha2.IPAddressState{ + assert.True(t, reflect.DeepEqual(ipAddressState, &crdv1beta1.IPAddressState{ IPAddress: expectedPodIP.String(), - Phase: crdv1alpha2.IPAddressPhaseAllocated, - Owner: crdv1alpha2.IPAddressOwner{ - Pod: &crdv1alpha2.PodOwner{ + Phase: crdv1beta1.IPAddressPhaseAllocated, + Owner: crdv1beta1.IPAddressOwner{ + Pod: &crdv1beta1.PodOwner{ Name: podName, Namespace: testAntreaIPAMNamespace, ContainerID: ipAddressState.Owner.Pod.ContainerID, @@ -434,20 +509,20 @@ func testAntreaIPAMStatefulSet(t *testing.T, data *TestData, dedicatedIPPoolKey } func checkStatefulSetIPPoolAllocation(tb testing.TB, data *TestData, name string, namespace string, ipPoolName string, ipOffsets, reservedIPOffsets []int32) { - ipPool, err := data.crdClient.CrdV1alpha2().IPPools().Get(context.TODO(), ipPoolName, metav1.GetOptions{}) + ipPool, err := data.crdClient.CrdV1beta1().IPPools().Get(context.TODO(), ipPoolName, metav1.GetOptions{}) if err != nil { tb.Fatalf("Failed to get IPPool %s, err: %+v", ipPoolName, err) } startIP := net.ParseIP(ipPool.Spec.IPRanges[0].Start) - expectedIPAddressMap := map[string]*crdv1alpha2.IPAddressState{} + expectedIPAddressMap := map[string]*crdv1beta1.IPAddressState{} for i, offset := range ipOffsets { ipString := utilnet.AddIPOffset(utilnet.BigForIP(startIP), int(offset)).String() podName := fmt.Sprintf("%s-%d", name, i) - expectedIPAddressMap[ipString] = &crdv1alpha2.IPAddressState{ + expectedIPAddressMap[ipString] = &crdv1beta1.IPAddressState{ IPAddress: ipString, - Phase: crdv1alpha2.IPAddressPhaseAllocated, - Owner: crdv1alpha2.IPAddressOwner{ - Pod: &crdv1alpha2.PodOwner{ + Phase: crdv1beta1.IPAddressPhaseAllocated, + Owner: crdv1beta1.IPAddressOwner{ + Pod: &crdv1beta1.PodOwner{ Name: podName, Namespace: namespace, ContainerID: "", @@ -457,7 +532,7 @@ func checkStatefulSetIPPoolAllocation(tb testing.TB, data *TestData, name string } for i, offset := range reservedIPOffsets { ipString := utilnet.AddIPOffset(utilnet.BigForIP(startIP), int(offset)).String() - stsOwner := &crdv1alpha2.StatefulSetOwner{ + stsOwner := &crdv1beta1.StatefulSetOwner{ Name: name, Namespace: namespace, Index: i, @@ -465,10 +540,10 @@ func checkStatefulSetIPPoolAllocation(tb testing.TB, data *TestData, name string if _, ok := expectedIPAddressMap[ipString]; ok { expectedIPAddressMap[ipString].Owner.StatefulSet = stsOwner } else { - expectedIPAddressMap[ipString] = &crdv1alpha2.IPAddressState{ + expectedIPAddressMap[ipString] = &crdv1beta1.IPAddressState{ IPAddress: ipString, - Phase: crdv1alpha2.IPAddressPhaseReserved, - Owner: crdv1alpha2.IPAddressOwner{ + Phase: crdv1beta1.IPAddressPhaseReserved, + Owner: crdv1beta1.IPAddressOwner{ StatefulSet: stsOwner, }, } @@ -478,11 +553,11 @@ func checkStatefulSetIPPoolAllocation(tb testing.TB, data *TestData, name string tb.Logf("expectedIPAddressMap: %s", expectedIPAddressJson) err = wait.PollUntilContextTimeout(context.Background(), time.Second*3, time.Second*15, false, func(ctx context.Context) (bool, error) { - ipPool, err := data.crdClient.CrdV1alpha2().IPPools().Get(context.TODO(), ipPoolName, metav1.GetOptions{}) + ipPool, err := data.crdClient.CrdV1beta1().IPPools().Get(context.TODO(), ipPoolName, metav1.GetOptions{}) if err != nil { tb.Fatalf("Failed to get IPPool %s, err: %+v", ipPoolName, err) } - actualIPAddressMap := map[string]*crdv1alpha2.IPAddressState{} + actualIPAddressMap := map[string]*crdv1beta1.IPAddressState{} actualIPAddressLoop: for i, ipAddress := range ipPool.Status.IPAddresses { for expectedIP := range expectedIPAddressMap { @@ -520,14 +595,14 @@ func deleteAntreaIPAMNamespace(tb testing.TB, data *TestData, namespace string) } } -func createIPPool(tb testing.TB, data *TestData, key string) (*crdv1alpha2.IPPool, error) { +func createIPPool(tb testing.TB, data *TestData, key string) (*crdv1beta1.IPPool, error) { ipv4IPPool := subnetIPv4RangesMap[key] tb.Logf("Creating IPPool '%s'", ipv4IPPool.Name) - return data.crdClient.CrdV1alpha2().IPPools().Create(context.TODO(), &ipv4IPPool, metav1.CreateOptions{}) + return data.crdClient.CrdV1beta1().IPPools().Create(context.TODO(), &ipv4IPPool, metav1.CreateOptions{}) } -func checkIPPoolAllocation(tb testing.TB, data *TestData, ipPoolName, podIPString string) (isBelongTo bool, ipAddressState *crdv1alpha2.IPAddressState, err error) { - ipPool, err := data.crdClient.CrdV1alpha2().IPPools().Get(context.TODO(), ipPoolName, metav1.GetOptions{}) +func checkIPPoolAllocation(tb testing.TB, data *TestData, ipPoolName, podIPString string) (isBelongTo bool, ipAddressState *crdv1beta1.IPAddressState, err error) { + ipPool, err := data.crdClient.CrdV1beta1().IPPools().Get(context.TODO(), ipPoolName, metav1.GetOptions{}) if err != nil { return } @@ -561,8 +636,8 @@ func checkIPPoolAllocation(tb testing.TB, data *TestData, ipPoolName, podIPStrin func deleteIPPoolWrapper(tb testing.TB, data *TestData, name string) { tb.Logf("Deleting IPPool '%s'", name) for i := 0; i < 10; i++ { - if err := data.crdClient.CrdV1alpha2().IPPools().Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { - ipPool, _ := data.crdClient.CrdV1alpha2().IPPools().Get(context.TODO(), name, metav1.GetOptions{}) + if err := data.crdClient.CrdV1beta1().IPPools().Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { + ipPool, _ := data.crdClient.CrdV1beta1().IPPools().Get(context.TODO(), name, metav1.GetOptions{}) ipPoolJson, _ := json.Marshal(ipPool) tb.Logf("Error when deleting IPPool, err: %v, data: %s", err, ipPoolJson) time.Sleep(defaultInterval) @@ -576,7 +651,7 @@ func checkIPPoolsEmpty(tb testing.TB, data *TestData, names []string) { count := 0 err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, defaultTimeout, true, func(ctx context.Context) (bool, error) { for _, name := range names { - ipPool, _ := data.crdClient.CrdV1alpha2().IPPools().Get(context.TODO(), name, metav1.GetOptions{}) + ipPool, _ := data.crdClient.CrdV1beta1().IPPools().Get(context.TODO(), name, metav1.GetOptions{}) if len(ipPool.Status.IPAddresses) > 0 { ipPoolJson, _ := json.Marshal(ipPool) if count > 20 { diff --git a/test/e2e/basic_test.go b/test/e2e/basic_test.go index 13778aa70e9..eb176d10a26 100644 --- a/test/e2e/basic_test.go +++ b/test/e2e/basic_test.go @@ -35,7 +35,7 @@ import ( "antrea.io/antrea/pkg/agent/apis" "antrea.io/antrea/pkg/agent/config" "antrea.io/antrea/pkg/agent/openflow/cookie" - crdv1alpha2 "antrea.io/antrea/pkg/apis/crd/v1alpha2" + crdv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1" "antrea.io/antrea/pkg/clusteridentity" ) @@ -197,7 +197,7 @@ func (data *TestData) testDeletePod(t *testing.T, podName string, nodeName strin if err != nil { t.Fatalf("Cannot check IPPool allocation: %v", err) } - return err == nil && ipAddressState != nil && ipAddressState.Phase == crdv1alpha2.IPAddressPhaseAllocated + return err == nil && ipAddressState != nil && ipAddressState.Phase == crdv1beta1.IPAddressPhaseAllocated } } diff --git a/test/e2e/secondary_network_ipam_test.go b/test/e2e/secondary_network_ipam_test.go index 43d0f1ad9aa..41ba3ce0292 100644 --- a/test/e2e/secondary_network_ipam_test.go +++ b/test/e2e/secondary_network_ipam_test.go @@ -21,41 +21,43 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "antrea.io/antrea/pkg/agent/config" - crdv1alpha2 "antrea.io/antrea/pkg/apis/crd/v1alpha2" + crdv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1" "antrea.io/antrea/pkg/features" ) var ( - testIPPoolv4 = &crdv1alpha2.IPPool{ + testIPPoolv4 = &crdv1beta1.IPPool{ ObjectMeta: metav1.ObjectMeta{ Name: "test-ippool-ipv4", }, - Spec: crdv1alpha2.IPPoolSpec{ - IPVersion: crdv1alpha2.IPv4, - IPRanges: []crdv1alpha2.SubnetIPRange{{IPRange: crdv1alpha2.IPRange{ - CIDR: "10.123.1.0/24", + Spec: crdv1beta1.IPPoolSpec{ + IPRanges: []crdv1beta1.IPRange{ + { + CIDR: "10.123.1.0/24", + }, + }, + SubnetInfo: crdv1beta1.SubnetInfo{ + Gateway: "10.123.1.254", + PrefixLength: 24, }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "10.123.1.254", - PrefixLength: 24, - }}}, }, } - testIPPoolv6 = &crdv1alpha2.IPPool{ + testIPPoolv6 = &crdv1beta1.IPPool{ ObjectMeta: metav1.ObjectMeta{ Name: "test-ippool-ipv6", }, - Spec: crdv1alpha2.IPPoolSpec{ - IPVersion: crdv1alpha2.IPv6, - IPRanges: []crdv1alpha2.SubnetIPRange{{IPRange: crdv1alpha2.IPRange{ - Start: "3ffe:ffff:1:01ff::0101", - End: "3ffe:ffff:1:01ff::0200", + Spec: crdv1beta1.IPPoolSpec{ + IPRanges: []crdv1beta1.IPRange{ + { + Start: "3ffe:ffff:1:01ff::0101", + End: "3ffe:ffff:1:01ff::0200", + }, + }, + SubnetInfo: crdv1beta1.SubnetInfo{ + Gateway: "3ffe:ffff:1:01ff::1", + PrefixLength: 64, }, - SubnetInfo: crdv1alpha2.SubnetInfo{ - Gateway: "3ffe:ffff:1:01ff::1", - PrefixLength: 64, - }}}, }, } @@ -252,12 +254,12 @@ func TestSecondaryNetworkIPAM(t *testing.T) { skipIfProxyDisabled(t, data) skipIfEncapModeIsNot(t, data, config.TrafficEncapModeEncap) - _, err = data.crdClient.CrdV1alpha2().IPPools().Create(context.TODO(), testIPPoolv4, metav1.CreateOptions{}) + _, err = data.crdClient.CrdV1beta1().IPPools().Create(context.TODO(), testIPPoolv4, metav1.CreateOptions{}) defer deleteIPPoolWrapper(t, data, testIPPoolv4.Name) if err != nil { t.Fatalf("Failed to create v4 IPPool CR: %v", err) } - _, err = data.crdClient.CrdV1alpha2().IPPools().Create(context.TODO(), testIPPoolv6, metav1.CreateOptions{}) + _, err = data.crdClient.CrdV1beta1().IPPools().Create(context.TODO(), testIPPoolv6, metav1.CreateOptions{}) defer deleteIPPoolWrapper(t, data, testIPPoolv6.Name) if err != nil { t.Fatalf("Failed to create v6 IPPool CR: %v", err)