From a51e7dd07d660fee6af3f8a5dfac4ceb34df40c0 Mon Sep 17 00:00:00 2001 From: Boris Date: Tue, 11 Feb 2025 14:37:58 +0300 Subject: [PATCH] refact ip stack (#11953) --- .gitlab-ci/vagrant.yml | 16 ++++- Vagrantfile | 8 ++- contrib/terraform/terraform.py | 1 + docs/ansible/vars.md | 39 ++++++++++-- .../group_vars/k8s_cluster/k8s-cluster.yml | 15 ++--- .../group_vars/k8s_cluster/k8s-net-calico.yml | 2 +- .../containerd/defaults/main.yml | 2 +- .../templates/cri-dockerd.service.j2 | 2 +- roles/etcd/handlers/main.yml | 4 +- roles/etcd/tasks/configure.yml | 4 +- roles/etcd/tasks/join_etcd-events_member.yml | 2 +- roles/etcd/tasks/join_etcd_member.yml | 2 +- roles/etcd/templates/etcd-events.env.j2 | 4 +- roles/etcd/templates/etcd.env.j2 | 6 +- roles/etcd/templates/openssl.conf.j2 | 11 +++- roles/kubernetes/client/tasks/main.yml | 2 +- .../defaults/main/kube-scheduler.yml | 2 +- .../control-plane/defaults/main/main.yml | 6 +- .../control-plane/handlers/main.yml | 4 +- .../control-plane/tasks/kubeadm-secondary.yml | 6 +- .../control-plane/tasks/kubeadm-setup.yml | 7 ++- .../control-plane/tasks/kubeadm-upgrade.yml | 2 +- .../templates/kubeadm-config.v1beta3.yaml.j2 | 40 ++++++------- .../templates/kubeadm-config.v1beta4.yaml.j2 | 35 ++++++----- .../templates/kubeadm-controlplane.yaml.j2 | 4 +- roles/kubernetes/kubeadm/tasks/main.yml | 2 +- .../kubeadm/templates/kubeadm-client.conf.j2 | 6 +- roles/kubernetes/node/defaults/main.yml | 14 +++-- roles/kubernetes/node/tasks/main.yml | 4 +- .../templates/kubelet-config.v1beta1.yaml.j2 | 6 +- .../templates/loadbalancer/haproxy.cfg.j2 | 6 +- .../node/templates/loadbalancer/nginx.conf.j2 | 6 +- .../node/templates/node-kubeconfig.yaml.j2 | 2 +- .../preinstall/tasks/0040-verify-settings.yml | 60 ++++++++++++++++--- .../tasks/0080-system-configurations.yml | 3 +- .../preinstall/tasks/0090-etchosts.yml | 9 ++- .../kubespray-defaults/defaults/main/main.yml | 60 ++++++++++--------- roles/kubespray-defaults/tasks/main.yaml | 32 ++++++++++ roles/kubespray-defaults/tasks/no_proxy.yml | 4 +- roles/kubespray-defaults/vars/main.yml | 20 +++++++ roles/network_plugin/calico/tasks/check.yml | 44 +++++++++++++- roles/network_plugin/calico/tasks/install.yml | 21 +++++-- .../calico/templates/calico-config.yml.j2 | 12 ++-- .../calico/templates/calico-node.yml.j2 | 17 ++++-- .../calico_defaults/defaults/main.yml | 2 +- roles/network_plugin/cilium/defaults/main.yml | 4 +- .../network_plugin/flannel/defaults/main.yml | 2 +- .../flannel/templates/cni-flannel.yml.j2 | 6 +- .../network_plugin/kube-ovn/defaults/main.yml | 19 +++++- .../kube-ovn/templates/cni-kube-ovn.yml.j2 | 18 +++--- .../kube-ovn/templates/cni-ovn.yml.j2 | 6 +- .../kube-router/templates/kubeconfig.yml.j2 | 2 +- roles/network_plugin/weave/defaults/main.yml | 2 +- .../post-recover/tasks/main.yml | 4 +- .../remove-etcd-node/tasks/main.yml | 9 ++- roles/reset/tasks/main.yml | 4 +- .../vagrant_ubuntu20-calico-dual-stack.yml | 3 - ... => vagrant_ubuntu24-calico-dual-stack.rb} | 4 +- .../vagrant_ubuntu24-calico-dual-stack.yml | 8 +++ .../vagrant_ubuntu24-calico-ipv6only-stack.rb | 9 +++ ...vagrant_ubuntu24-calico-ipv6only-stack.yml | 12 ++++ tests/testcases/010_check-apiserver.yml | 2 +- tests/testcases/030_check-network.yml | 4 +- tests/testcases/040_check-network-adv.yml | 4 +- 64 files changed, 470 insertions(+), 208 deletions(-) delete mode 100644 tests/files/vagrant_ubuntu20-calico-dual-stack.yml rename tests/files/{vagrant_ubuntu20-calico-dual-stack.rb => vagrant_ubuntu24-calico-dual-stack.rb} (76%) create mode 100644 tests/files/vagrant_ubuntu24-calico-dual-stack.yml create mode 100644 tests/files/vagrant_ubuntu24-calico-ipv6only-stack.rb create mode 100644 tests/files/vagrant_ubuntu24-calico-ipv6only-stack.yml diff --git a/.gitlab-ci/vagrant.yml b/.gitlab-ci/vagrant.yml index 891011020ce..af2739676ee 100644 --- a/.gitlab-ci/vagrant.yml +++ b/.gitlab-ci/vagrant.yml @@ -36,11 +36,21 @@ - .cache/pip policy: pull-push # TODO: change to "pull" when not on main -vagrant_ubuntu20-calico-dual-stack: +vagrant_ubuntu24-calico-dual-stack: stage: deploy-extended extends: .vagrant - when: manual -# FIXME: this test if broken (perma-failing) + rules: + - if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/ + when: on_success + allow_failure: false + +vagrant_ubuntu24-calico-ipv6only-stack: + stage: deploy-extended + extends: .vagrant + rules: + - if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/ + when: on_success + allow_failure: false vagrant_ubuntu20-flannel: stage: deploy-part1 diff --git a/Vagrantfile b/Vagrantfile index 580d75194eb..b7caebf6330 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -210,14 +210,20 @@ Vagrant.configure("2") do |config| end ip = "#{$subnet}.#{i+100}" + ip6 = "#{$subnet_ipv6}::#{i+100}" node.vm.network :private_network, :ip => ip, :libvirt__guest_ipv6 => 'yes', - :libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}", + :libvirt__ipv6_address => ip6, :libvirt__ipv6_prefix => "64", :libvirt__forward_mode => "none", :libvirt__dhcp_enabled => false + # libvirt__ipv6_address does not work as intended, the address is obtained with the desired prefix, but auto-generated(like fd3c:b398:698:756:5054:ff:fe48:c61e/64) + # add default route for detect ansible_default_ipv6 + # TODO: fix libvirt__ipv6 or use $subnet in shell + config.vm.provision "shell", inline: "ip -6 r a fd3c:b398:698:756::/64 dev eth1;ip -6 r add default via fd3c:b398:0698:0756::1 dev eth1 || true" + # Disable swap for each vm node.vm.provision "shell", inline: "swapoff -a" diff --git a/contrib/terraform/terraform.py b/contrib/terraform/terraform.py index c22eb9f41c8..9f6132711ed 100755 --- a/contrib/terraform/terraform.py +++ b/contrib/terraform/terraform.py @@ -273,6 +273,7 @@ def openstack_host(resource, module_name): 'access_ip_v4': raw_attrs['access_ip_v4'], 'access_ip_v6': raw_attrs['access_ip_v6'], 'access_ip': raw_attrs['access_ip_v4'], + 'access_ip6': raw_attrs['access_ip_v6'], 'ip': raw_attrs['network.0.fixed_ip_v4'], 'flavor': parse_dict(raw_attrs, 'flavor', sep='_'), diff --git a/docs/ansible/vars.md b/docs/ansible/vars.md index 0bda826f57b..5258baf1343 100644 --- a/docs/ansible/vars.md +++ b/docs/ansible/vars.md @@ -41,8 +41,12 @@ Some variables of note include: * *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip and access_ip are undefined * *ip6* - IPv6 address to use for binding services. (host var) - If *enable_dual_stack_networks* is set to ``true`` and *ip6* is defined, + If *ipv6_stack*(*enable_dual_stack_networks* deprecated) is set to ``true`` and *ip6* is defined, kubelet's ``--node-ip`` and node's ``InternalIP`` will be the combination of *ip* and *ip6*. + Similarly used for ipv6only scheme. +* *access_ip6* - similarly ``access_ip`` but IPv6 +* *ansible_default_ipv6.address* - Not Kubespray-specific, but it is used if ip6 + and access_ip6 are undefined * *loadbalancer_apiserver* - If defined, all hosts will connect to this address instead of localhost for kube_control_planes and kube_control_plane[0] for kube_nodes. See more details in the @@ -52,6 +56,20 @@ Some variables of note include: `loadbalancer_apiserver`. See more details in the [HA guide](/docs/operations/ha-mode.md). +## Special network variables + +These variables help avoid a large number of if/else constructs throughout the code associated with enabling different network stack. +These variables are used in all templates. +By default, only ipv4_stack is enabled, so it is given priority in dualstack mode. +Don't change these variables if you don't understand what you're doing. + +* *main_access_ip* - equal to ``access_ip`` when ipv4_stack is enabled(even in case of dualstack), + and ``access_ip6`` for IPv6 only clusters +* *main_ip* - equal to ``ip`` when ipv4_stack is enabled(even in case of dualstack), + and ``ip6`` for IPv6 only clusters +* *main_access_ips* - list of ``access_ip`` and ``access_ip6`` for dualstack and one corresponding variable for single +* *main_ips* - list of ``ip`` and ``ip6`` for dualstack and one corresponding variable for single + ## Cluster variables Kubernetes needs some parameters in order to get deployed. These are the @@ -83,12 +101,18 @@ following default cluster parameters: (assertion not applicable to calico which doesn't use this as a hard limit, see [Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes)). -* *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services. - * *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``. +* *kube_service_subnets* - All service subnets separated by commas (default is a mix of ``kube_service_addresses`` and ``kube_service_addresses_ipv6`` depending on ``ipv4_stack`` and ``ipv6_stacke`` options), + for example ``10.233.0.0/18,fd85:ee78:d8a6:8607::1000/116`` for dual stack(ipv4_stack/ipv6_stack set to `true`). + It is not recommended to change this variable directly. + * *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``. +* *kube_pods_subnets* - All pods subnets separated by commas (default is a mix of ``kube_pods_subnet`` and ``kube_pod_subnet_ipv6`` depending on ``ipv4_stack`` and ``ipv6_stacke`` options), + for example ``10.233.64.0/18,fd85:ee78:d8a6:8607::1:0000/112`` for dual stack(ipv4_stack/ipv6_stack set to `true`). + It is not recommended to change this variable directly. + * *kube_network_node_prefix_ipv6* - Subnet allocated per-node for pod IPv6 IPs. Remaining bits in ``kube_pods_subnet_ipv6`` dictates how many kube_nodes can be in cluster. * *skydns_server* - Cluster IP for DNS (default is 10.233.0.3) @@ -152,9 +176,14 @@ Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances' private addresses, make sure to pick another values for ``kube_service_addresses`` and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``. -## Enabling Dual Stack (IPV4 + IPV6) networking +## Enabling Dual Stack (IPV4 + IPV6) or IPV6 only networking -If *enable_dual_stack_networks* is set to ``true``, Dual Stack networking will be enabled in the cluster. This will use the default IPv4 and IPv6 subnets specified in the defaults file in the ``kubespray-defaults`` role, unless overridden of course. The default config will give you room for up to 256 nodes with 126 pods per node, and up to 4096 services. +IPv4 stack enable by *ipv4_stack* is set to ``true``, by default. +IPv6 stack enable by *ipv6_stack* is set to ``false`` by default. +This will use the default IPv4 and IPv6 subnets specified in the defaults file in the ``kubespray-defaults`` role, unless overridden of course. The default config will give you room for up to 256 nodes with 126 pods per node, and up to 4096 services. +Set both variables to ``true`` for Dual Stack mode. +IPv4 has higher priority in Dual Stack mode(e.g. in variables `main_ip`, `main_access_ip` and other). +You can also make IPv6 only clusters with ``false`` in *ipv4_stack*. ## DNS variables diff --git a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml index 67ee26b5911..b3e56a6626b 100644 --- a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml @@ -97,27 +97,24 @@ kube_pods_subnet: 10.233.64.0/18 # - kubelet_max_pods: 110 kube_network_node_prefix: 24 -# Configure Dual Stack networking (i.e. both IPv4 and IPv6) -enable_dual_stack_networks: false - # Kubernetes internal network for IPv6 services, unused block of space. -# This is only used if enable_dual_stack_networks is set to true +# This is only used if ipv6_stack is set to true # This provides 4096 IPv6 IPs kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 # Internal network. When used, it will assign IPv6 addresses from this range to individual pods. # This network must not already be in your network infrastructure! -# This is only used if enable_dual_stack_networks is set to true. +# This is only used if ipv6_stack is set to true. # This provides room for 256 nodes with 254 pods per node. kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 # IPv6 subnet size allocated to each for pods. -# This is only used if enable_dual_stack_networks is set to true +# This is only used if ipv6_stack is set to true # This provides room for 254 pods per node. kube_network_node_prefix_ipv6: 120 # The port the API Server will be listening on. -kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}" +kube_apiserver_ip: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}" kube_apiserver_port: 6443 # (https) # Kube-proxy proxyMode configuration. @@ -215,8 +212,8 @@ resolvconf_mode: host_resolvconf # Deploy netchecker app to verify DNS resolve as an HTTP service deploy_netchecker: false # Ip address of the kubernetes skydns service -skydns_server: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}" -skydns_server_secondary: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}" +skydns_server: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}" dns_domain: "{{ cluster_name }}" ## Container runtime diff --git a/inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml index e21a08a57c7..245cf73d0d9 100644 --- a/inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml +++ b/inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml @@ -11,7 +11,7 @@ calico_cni_name: k8s-pod-network # Enables Internet connectivity from containers # nat_outgoing: true -# nat_outgoing_ipv6: false +# nat_outgoing_ipv6: true # Enables Calico CNI "host-local" IPAM plugin # calico_ipam_host_local: true diff --git a/roles/container-engine/containerd/defaults/main.yml b/roles/container-engine/containerd/defaults/main.yml index 21a2842c3dd..87fcf501cc0 100644 --- a/roles/container-engine/containerd/defaults/main.yml +++ b/roles/container-engine/containerd/defaults/main.yml @@ -122,7 +122,7 @@ enable_cdi: false # For containerd tracing configuration please check out the official documentation: # https://github.com/containerd/containerd/blob/main/docs/tracing.md containerd_tracing_enabled: false -containerd_tracing_endpoint: "0.0.0.0:4317" +containerd_tracing_endpoint: "[::]:4317" containerd_tracing_protocol: "grpc" containerd_tracing_sampling_ratio: 1.0 containerd_tracing_service_name: "containerd" diff --git a/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 b/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 index df88c7dabde..b53930519ed 100644 --- a/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 +++ b/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 @@ -7,7 +7,7 @@ Requires=cri-dockerd.socket [Service] Type=notify -ExecStart={{ bin_dir }}/cri-dockerd --container-runtime-endpoint {{ cri_socket }} --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --network-plugin=cni --pod-cidr={{ kube_pods_subnet }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_version }} --log-level {{ cri_dockerd_log_level }} {% if enable_dual_stack_networks %}--ipv6-dual-stack=True{% endif %} +ExecStart={{ bin_dir }}/cri-dockerd --container-runtime-endpoint {{ cri_socket }} --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --network-plugin=cni --pod-cidr={{ kube_pods_subnets }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_version }} --log-level {{ cri_dockerd_log_level }} {% if ipv6_stack %}--ipv6-dual-stack=True{% endif %} ExecReload=/bin/kill -s HUP $MAINPID TimeoutSec=0 diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml index b1123f530e4..1d2205b8b35 100644 --- a/roles/etcd/handlers/main.yml +++ b/roles/etcd/handlers/main.yml @@ -24,7 +24,7 @@ - name: Wait for etcd up uri: - url: "https://{% if 'etcd' in group_names %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health" + url: "https://{% if 'etcd' in group_names %}{{ etcd_address | ansible.utils.ipwrap }}{% else %}127.0.0.1{% endif %}:2379/health" validate_certs: false client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem" client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem" @@ -39,7 +39,7 @@ - name: Wait for etcd-events up uri: - url: "https://{% if 'etcd' in group_names %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health" + url: "https://{% if 'etcd' in group_names %}{{ etcd_address | ansible.utils.ipwrap }}{% else %}127.0.0.1{% endif %}:2383/health" validate_certs: false client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem" client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml index 58383fa1fe9..25629fa4cc4 100644 --- a/roles/etcd/tasks/configure.yml +++ b/roles/etcd/tasks/configure.yml @@ -145,7 +145,7 @@ ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}" - name: Configure | Check if member is in etcd cluster - shell: "{{ bin_dir }}/etcdctl member list | grep -w -q {{ etcd_access_address }}" + shell: "{{ bin_dir }}/etcdctl member list | grep -w -q {{ etcd_access_address | replace('[', '') | replace(']', '') }}" register: etcd_member_in_cluster ignore_errors: true # noqa ignore-errors changed_when: false @@ -163,7 +163,7 @@ ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" - name: Configure | Check if member is in etcd-events cluster - shell: "{{ bin_dir }}/etcdctl member list | grep -w -q {{ etcd_access_address }}" + shell: "{{ bin_dir }}/etcdctl member list | grep -w -q {{ etcd_access_address | replace('[', '') | replace(']', '') }}" register: etcd_events_member_in_cluster ignore_errors: true # noqa ignore-errors changed_when: false diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml index 10dd1c6352d..6094202aa25 100644 --- a/roles/etcd/tasks/join_etcd-events_member.yml +++ b/roles/etcd/tasks/join_etcd-events_member.yml @@ -19,7 +19,7 @@ etcd_events_peer_addresses: >- {% for host in groups['etcd'] -%} {%- if hostvars[host]['etcd_events_member_in_cluster'].rc == 0 -%} - {{ "etcd" + loop.index | string }}=https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host].ip | default(hostvars[host]['fallback_ip'])) }}:2382, + {{ "etcd" + loop.index | string }}="https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host]['main_ip']) | ansible.utils.ipwrap }}:2382", {%- endif -%} {%- if loop.last -%} {{ etcd_member_name }}={{ etcd_events_peer_url }} diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml index 7599d7d268b..7d54e44ed78 100644 --- a/roles/etcd/tasks/join_etcd_member.yml +++ b/roles/etcd/tasks/join_etcd_member.yml @@ -20,7 +20,7 @@ etcd_peer_addresses: >- {% for host in groups['etcd'] -%} {%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%} - {{ "etcd" + loop.index | string }}=https://{{ hostvars[host].etcd_access_address | default(hostvars[host].ip | default(hostvars[host]['fallback_ip'])) }}:2380, + {{ "etcd" + loop.index | string }}="https://{{ hostvars[host].etcd_access_address | default(hostvars[host]['main_ip']) | ansible.utils.ipwrap }}:2380", {%- endif -%} {%- if loop.last -%} {{ etcd_member_name }}={{ etcd_peer_url }} diff --git a/roles/etcd/templates/etcd-events.env.j2 b/roles/etcd/templates/etcd-events.env.j2 index 3abefd6f783..1e576f5617d 100644 --- a/roles/etcd/templates/etcd-events.env.j2 +++ b/roles/etcd/templates/etcd-events.env.j2 @@ -4,11 +4,11 @@ ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_events_peer_url }} ETCD_INITIAL_CLUSTER_STATE={% if etcd_events_cluster_is_healthy.rc == 0 | bool %}existing{% else %}new{% endif %} ETCD_METRICS={{ etcd_metrics }} -ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2383,https://127.0.0.1:2383 +ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address | ansible.utils.ipwrap }}:2383,https://127.0.0.1:2383 ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }} ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }} ETCD_INITIAL_CLUSTER_TOKEN=k8s_events_etcd -ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2382 +ETCD_LISTEN_PEER_URLS=https://{{ etcd_address | ansible.utils.ipwrap }}:2382 ETCD_NAME={{ etcd_member_name }}-events ETCD_PROXY=off ETCD_INITIAL_CLUSTER={{ etcd_events_peer_addresses }} diff --git a/roles/etcd/templates/etcd.env.j2 b/roles/etcd/templates/etcd.env.j2 index ec217957b7f..3f7d39d09fc 100644 --- a/roles/etcd/templates/etcd.env.j2 +++ b/roles/etcd/templates/etcd.env.j2 @@ -8,13 +8,13 @@ ETCD_METRICS={{ etcd_metrics }} {% if etcd_listen_metrics_urls is defined %} ETCD_LISTEN_METRICS_URLS={{ etcd_listen_metrics_urls }} {% elif etcd_metrics_port is defined %} -ETCD_LISTEN_METRICS_URLS=http://{{ etcd_address }}:{{ etcd_metrics_port }},http://127.0.0.1:{{ etcd_metrics_port }} +ETCD_LISTEN_METRICS_URLS=http://{{ etcd_address | ansible.utils.ipwrap }}:{{ etcd_metrics_port }},http://127.0.0.1:{{ etcd_metrics_port }} {% endif %} -ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2379,https://127.0.0.1:2379 +ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address | ansible.utils.ipwrap }}:2379,https://127.0.0.1:2379 ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }} ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }} ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd -ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2380 +ETCD_LISTEN_PEER_URLS=https://{{ etcd_address | ansible.utils.ipwrap }}:2380 ETCD_NAME={{ etcd_member_name }} ETCD_PROXY=off ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }} diff --git a/roles/etcd/templates/openssl.conf.j2 b/roles/etcd/templates/openssl.conf.j2 index 6ac5dd41025..4186aaa5eff 100644 --- a/roles/etcd/templates/openssl.conf.j2 +++ b/roles/etcd/templates/openssl.conf.j2 @@ -42,9 +42,16 @@ DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }} {% if hostvars[host]['access_ip'] is defined %} IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }} {% endif %} -IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['fallback_ip']) }}{{ increment(counter, 'ip') }} +{% if hostvars[host]['access_ip6'] is defined %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip6'] }}{{ increment(counter, 'ip') }} +{% endif %} +{% if ipv6_stack %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['ip6'] | default(hostvars[host]['fallback_ip6']) }}{{ increment(counter, 'ip') }} +{% endif %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['main_ip'] }}{{ increment(counter, 'ip') }} {% endfor %} {% for cert_alt_ip in etcd_cert_alt_ips %} IP.{{ counter["ip"] }} = {{ cert_alt_ip }}{{ increment(counter, 'ip') }} {% endfor %} -IP.{{ counter["ip"] }} = 127.0.0.1 +IP.{{ counter["ip"] }} = 127.0.0.1{{ increment(counter, 'ip') }} +IP.{{ counter["ip"] }} = ::1 diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml index 9202051981f..bb0786d1a48 100644 --- a/roles/kubernetes/client/tasks/main.yml +++ b/roles/kubernetes/client/tasks/main.yml @@ -71,7 +71,7 @@ user_certs: "{{ admin_kubeconfig['users'][0]['user'] }}" username: "kubernetes-admin-{{ cluster_name }}" context: "kubernetes-admin-{{ cluster_name }}@{{ cluster_name }}" - override_cluster_name: "{{ {'clusters': [{'cluster': (cluster_infos | combine({'server': 'https://' + external_apiserver_address + ':' + (external_apiserver_port | string)})), 'name': cluster_name}]} }}" + override_cluster_name: "{{ {'clusters': [{'cluster': (cluster_infos | combine({'server': 'https://' + (external_apiserver_address | ansible.utils.ipwrap) + ':' + (external_apiserver_port | string)})), 'name': cluster_name}]} }}" override_context: "{{ {'contexts': [{'context': {'user': username, 'cluster': cluster_name}, 'name': context}], 'current-context': context} }}" override_user: "{{ {'users': [{'name': username, 'user': user_certs}]} }}" when: kubeconfig_localhost diff --git a/roles/kubernetes/control-plane/defaults/main/kube-scheduler.yml b/roles/kubernetes/control-plane/defaults/main/kube-scheduler.yml index e61bcb7725c..98092f4316a 100644 --- a/roles/kubernetes/control-plane/defaults/main/kube-scheduler.yml +++ b/roles/kubernetes/control-plane/defaults/main/kube-scheduler.yml @@ -4,7 +4,7 @@ kube_kubeadm_scheduler_extra_args: {} # Associated interface must be reachable by the rest of the cluster, and by # CLI/web clients. -kube_scheduler_bind_address: 0.0.0.0 +kube_scheduler_bind_address: "::" # ClientConnection options (e.g. Burst, QPS) except from kubeconfig. kube_scheduler_client_conn_extra_opts: {} diff --git a/roles/kubernetes/control-plane/defaults/main/main.yml b/roles/kubernetes/control-plane/defaults/main/main.yml index dbc0f239618..1a201aee71c 100644 --- a/roles/kubernetes/control-plane/defaults/main/main.yml +++ b/roles/kubernetes/control-plane/defaults/main/main.yml @@ -6,7 +6,7 @@ upgrade_cluster_setup: false # listen on a specific address/interface. # NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost # loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too. -kube_apiserver_bind_address: 0.0.0.0 +kube_apiserver_bind_address: "::" # A port range to reserve for services with NodePort visibility. # Inclusive at both ends of the range. @@ -29,7 +29,7 @@ kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem # Associated interfaces must be reachable by the rest of the cluster, and by # CLI/web clients. -kube_controller_manager_bind_address: 0.0.0.0 +kube_controller_manager_bind_address: "::" # Leader election lease durations and timeouts for controller-manager kube_controller_manager_leader_elect_lease_duration: 15s @@ -242,7 +242,7 @@ kubeadm_upgrade_auto_cert_renewal: true ## Enable distributed tracing for kube-apiserver kube_apiserver_tracing: false -kube_apiserver_tracing_endpoint: 0.0.0.0:4317 +kube_apiserver_tracing_endpoint: "[::]:4317" kube_apiserver_tracing_sampling_rate_per_million: 100 # Enable kubeadm file discovery if anonymous access has been removed diff --git a/roles/kubernetes/control-plane/handlers/main.yml b/roles/kubernetes/control-plane/handlers/main.yml index ef554a238e4..3b0d5ac7963 100644 --- a/roles/kubernetes/control-plane/handlers/main.yml +++ b/roles/kubernetes/control-plane/handlers/main.yml @@ -78,7 +78,7 @@ - name: Control plane | wait for kube-scheduler vars: - endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}" + endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '::' else 'localhost' }}" uri: url: https://{{ endpoint }}:10259/healthz validate_certs: false @@ -92,7 +92,7 @@ - name: Control plane | wait for kube-controller-manager vars: - endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}" + endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '::' else 'localhost' }}" uri: url: https://{{ endpoint }}:10257/healthz validate_certs: false diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml index 245f4b2e777..4aa5e842438 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml @@ -4,7 +4,7 @@ # noqa: jinja[spacing] kubeadm_discovery_address: >- {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%} - {{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} + {{ first_kube_control_plane_address | ansible.utils.ipwrap }}:{{ kube_apiserver_port }} {%- else -%} {{ kube_apiserver_endpoint | regex_replace('https://', '') }} {%- endif %} @@ -43,8 +43,8 @@ - name: Wait for k8s apiserver wait_for: - host: "{{ kubeadm_discovery_address.split(':')[0] }}" - port: "{{ kubeadm_discovery_address.split(':')[1] }}" + host: "{{ kubeadm_discovery_address | regex_replace('\\]?:\\d+$', '') | regex_replace('^\\[', '') }}" + port: "{{ kubeadm_discovery_address.split(':')[-1] }}" timeout: 180 diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml index ae7b7506fe5..4adca61516f 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -35,12 +35,13 @@ - "{{ kube_apiserver_ip }}" - "localhost" - "127.0.0.1" + - "::1" sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}" sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}" sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}" - sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}" - sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}" - sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}" + sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_access_ip') | list | select('defined') | list }}" + sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_ip') | list | select('defined') | list }}" + sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv6', 'ansible_default_ipv4', 'address']) | list | select('defined') | list }}" sans_override: "{{ [kube_override_hostname] if kube_override_hostname else [] }}" sans_hostname: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}" sans_fqdn: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}" diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml index d8afd69dcf6..cc7987be4da 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml @@ -1,7 +1,7 @@ --- - name: Kubeadm | Check api is up uri: - url: "https://{{ ip | default(fallback_ip) }}:{{ kube_apiserver_port }}/healthz" + url: "https://{{ main_ip | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}/healthz" validate_certs: false when: ('kube_control_plane' in group_names) register: _result diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 index 067100185b0..349942044de 100644 --- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 @@ -7,7 +7,7 @@ bootstrapTokens: ttl: "24h" {% endif %} localAPIEndpoint: - advertiseAddress: {{ kube_apiserver_address }} + advertiseAddress: "{{ kube_apiserver_address }}" bindPort: {{ kube_apiserver_port }} {% if kubeadm_certificate_key is defined %} certificateKey: {{ kubeadm_certificate_key }} @@ -41,7 +41,7 @@ etcd: external: endpoints: {% for endpoint in etcd_access_addresses.split(',') %} - - {{ endpoint }} + - "{{ endpoint }}" {% endfor %} caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }} certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }} @@ -94,9 +94,9 @@ dns: imageTag: {{ coredns_image_tag }} networking: dnsDomain: {{ dns_domain }} - serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + serviceSubnet: "{{ kube_service_subnets }}" {% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} - podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" + podSubnet: "{{ kube_pods_subnets }}" {% endif %} {% if kubeadm_feature_gates %} featureGates: @@ -106,9 +106,9 @@ featureGates: {% endif %} kubernetesVersion: {{ kube_version }} {% if kubeadm_config_api_fqdn is defined %} -controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} +controlPlaneEndpoint: "{{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}" {% else %} -controlPlaneEndpoint: {{ ip | default(fallback_ip) }}:{{ kube_apiserver_port }} +controlPlaneEndpoint: "{{ main_ip | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}" {% endif %} certificatesDir: {{ kube_cert_dir }} imageRepository: {{ kube_image_repo }} @@ -131,7 +131,7 @@ apiServer: {% else %} authorization-mode: {{ authorization_modes | join(',') }} {% endif %} - bind-address: {{ kube_apiserver_bind_address }} + bind-address: "{{ kube_apiserver_bind_address }}" {% if kube_apiserver_enable_admission_plugins | length > 0 %} enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }} {% endif %} @@ -147,7 +147,7 @@ apiServer: etcd-servers-overrides: "/events#{{ etcd_events_access_addresses_semicolon }}" {% endif %} service-node-port-range: {{ kube_apiserver_node_port_range }} - service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + service-cluster-ip-range: "{{ kube_service_subnets }}" kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}" profiling: "{{ kube_profiling }}" request-timeout: "{{ kube_apiserver_request_timeout }}" @@ -294,7 +294,7 @@ apiServer: {% endif %} certSANs: {% for san in apiserver_sans %} - - "{{ san }}" + - {{ san }} {% endfor %} timeoutForControlPlane: 5m0s controllerManager: @@ -302,22 +302,22 @@ controllerManager: node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }} node-monitor-period: {{ kube_controller_node_monitor_period }} {% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} - cluster-cidr: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" + cluster-cidr: "{{ kube_pods_subnets }}" {% endif %} - service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + service-cluster-ip-range: "{{ kube_service_subnets }}" {% if kube_network_plugin is defined and kube_network_plugin == "calico" and not calico_ipam_host_local %} allocate-node-cidrs: "false" {% else %} -{% if enable_dual_stack_networks %} +{% if ipv4_stack %} node-cidr-mask-size-ipv4: "{{ kube_network_node_prefix }}" +{% endif %} +{% if ipv6_stack %} node-cidr-mask-size-ipv6: "{{ kube_network_node_prefix_ipv6 }}" -{% else %} - node-cidr-mask-size: "{{ kube_network_node_prefix }}" {% endif %} {% endif %} profiling: "{{ kube_profiling }}" terminated-pod-gc-threshold: "{{ kube_controller_terminated_pod_gc_threshold }}" - bind-address: {{ kube_controller_manager_bind_address }} + bind-address: "{{ kube_controller_manager_bind_address }}" leader-elect-lease-duration: {{ kube_controller_manager_leader_elect_lease_duration }} leader-elect-renew-deadline: {{ kube_controller_manager_leader_elect_renew_deadline }} {% if kube_controller_feature_gates or kube_feature_gates %} @@ -350,7 +350,7 @@ controllerManager: {% endif %} scheduler: extraArgs: - bind-address: {{ kube_scheduler_bind_address }} + bind-address: "{{ kube_scheduler_bind_address }}" config: {{ kube_config_dir }}/kubescheduler-config.yaml {% if kube_scheduler_feature_gates or kube_feature_gates %} feature-gates: "{{ kube_scheduler_feature_gates | default(kube_feature_gates, true) | join(',') }}" @@ -384,7 +384,7 @@ scheduler: --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -bindAddress: {{ kube_proxy_bind_address }} +bindAddress: "{{ kube_proxy_bind_address }}" clientConnection: acceptContentTypes: {{ kube_proxy_client_accept_content_types }} burst: {{ kube_proxy_client_burst }} @@ -392,7 +392,7 @@ clientConnection: kubeconfig: {{ kube_proxy_client_kubeconfig }} qps: {{ kube_proxy_client_qps }} {% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} -clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" +clusterCIDR: "{{ kube_pods_subnets }}" {% endif %} configSyncPeriod: {{ kube_proxy_config_sync_period }} conntrack: @@ -401,7 +401,7 @@ conntrack: tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }} tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }} enableProfiling: {{ kube_proxy_enable_profiling }} -healthzBindAddress: {{ kube_proxy_healthz_bind_address }} +healthzBindAddress: "{{ kube_proxy_healthz_bind_address }}" hostnameOverride: "{{ kube_override_hostname }}" iptables: masqueradeAll: {{ kube_proxy_masquerade_all }} @@ -417,7 +417,7 @@ ipvs: tcpTimeout: {{ kube_proxy_tcp_timeout }} tcpFinTimeout: {{ kube_proxy_tcp_fin_timeout }} udpTimeout: {{ kube_proxy_udp_timeout }} -metricsBindAddress: {{ kube_proxy_metrics_bind_address }} +metricsBindAddress: "{{ kube_proxy_metrics_bind_address }}" mode: {{ kube_proxy_mode }} nodePortAddresses: {{ kube_proxy_nodeport_addresses }} oomScoreAdj: {{ kube_proxy_oom_score_adj }} diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta4.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta4.yaml.j2 index 2646c4c0d96..dce73ad3bb0 100644 --- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta4.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta4.yaml.j2 @@ -7,7 +7,7 @@ bootstrapTokens: ttl: "24h" {% endif %} localAPIEndpoint: - advertiseAddress: {{ kube_apiserver_address }} + advertiseAddress: "{{ kube_apiserver_address }}" bindPort: {{ kube_apiserver_port }} {% if kubeadm_certificate_key is defined %} certificateKey: {{ kubeadm_certificate_key }} @@ -43,7 +43,7 @@ etcd: external: endpoints: {% for endpoint in etcd_access_addresses.split(',') %} - - {{ endpoint }} + - "{{ endpoint }}" {% endfor %} caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }} certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }} @@ -106,9 +106,9 @@ dns: imageTag: {{ coredns_image_tag }} networking: dnsDomain: {{ dns_domain }} - serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + serviceSubnet: "{{ kube_service_subnets }}" {% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} - podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" + podSubnet: "{{ kube_pods_subnets }}" {% endif %} {% if kubeadm_feature_gates %} featureGates: @@ -118,9 +118,9 @@ featureGates: {% endif %} kubernetesVersion: {{ kube_version }} {% if kubeadm_config_api_fqdn is defined %} -controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} +controlPlaneEndpoint: "{{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}" {% else %} -controlPlaneEndpoint: {{ ip | default(fallback_ip) }}:{{ kube_apiserver_port }} +controlPlaneEndpoint: "{{ main_ip | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}" {% endif %} certificatesDir: {{ kube_cert_dir }} imageRepository: {{ kube_image_repo }} @@ -174,7 +174,7 @@ apiServer: - name: service-node-port-range value: "{{ kube_apiserver_node_port_range }}" - name: service-cluster-ip-range - value: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + value: "{{ kube_service_subnets }}" - name: kubelet-preferred-address-types value: "{{ kubelet_preferred_address_types }}" - name: profiling @@ -351,7 +351,7 @@ apiServer: {% endif %} certSANs: {% for san in apiserver_sans %} - - "{{ san }}" + - {{ san }} {% endfor %} controllerManager: extraArgs: @@ -361,22 +361,21 @@ controllerManager: value: "{{ kube_controller_node_monitor_period }}" {% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} - name: cluster-cidr - value: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" + value: "{{ kube_pods_subnets }}" {% endif %} - name: service-cluster-ip-range - value: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + value: "{{ kube_service_subnets }}" {% if kube_network_plugin is defined and kube_network_plugin == "calico" and not calico_ipam_host_local %} - name: allocate-node-cidrs value: "false" {% else %} -{% if enable_dual_stack_networks %} +{% if ipv4_stack %} - name: node-cidr-mask-size-ipv4 value: "{{ kube_network_node_prefix }}" +{% endif %} +{% if ipv6_stack %} - name: node-cidr-mask-size-ipv6 value: "{{ kube_network_node_prefix_ipv6 }}" -{% else %} - - name: node-cidr-mask-size - value: "{{ kube_network_node_prefix }}" {% endif %} {% endif %} - name: profiling @@ -480,7 +479,7 @@ scheduler: --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -bindAddress: {{ kube_proxy_bind_address }} +bindAddress: "{{ kube_proxy_bind_address }}" clientConnection: acceptContentTypes: {{ kube_proxy_client_accept_content_types }} burst: {{ kube_proxy_client_burst }} @@ -488,7 +487,7 @@ clientConnection: kubeconfig: {{ kube_proxy_client_kubeconfig }} qps: {{ kube_proxy_client_qps }} {% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} -clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" +clusterCIDR: "{{ kube_pods_subnets }}" {% endif %} configSyncPeriod: {{ kube_proxy_config_sync_period }} conntrack: @@ -497,7 +496,7 @@ conntrack: tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }} tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }} enableProfiling: {{ kube_proxy_enable_profiling }} -healthzBindAddress: {{ kube_proxy_healthz_bind_address }} +healthzBindAddress: "{{ kube_proxy_healthz_bind_address }}" hostnameOverride: "{{ kube_override_hostname }}" iptables: masqueradeAll: {{ kube_proxy_masquerade_all }} @@ -513,7 +512,7 @@ ipvs: tcpTimeout: {{ kube_proxy_tcp_timeout }} tcpFinTimeout: {{ kube_proxy_tcp_fin_timeout }} udpTimeout: {{ kube_proxy_udp_timeout }} -metricsBindAddress: {{ kube_proxy_metrics_bind_address }} +metricsBindAddress: "{{ kube_proxy_metrics_bind_address }}" mode: {{ kube_proxy_mode }} nodePortAddresses: {{ kube_proxy_nodeport_addresses }} oomScoreAdj: {{ kube_proxy_oom_score_adj }} diff --git a/roles/kubernetes/control-plane/templates/kubeadm-controlplane.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-controlplane.yaml.j2 index d057256fc24..a8e7d320434 100644 --- a/roles/kubernetes/control-plane/templates/kubeadm-controlplane.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/kubeadm-controlplane.yaml.j2 @@ -9,7 +9,7 @@ discovery: {% if kubeadm_config_api_fqdn is defined %} apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} {% else %} - apiServerEndpoint: {{ kubeadm_discovery_address }} + apiServerEndpoint: "{{ kubeadm_discovery_address }}" {% endif %} token: {{ kubeadm_token }} unsafeSkipCAVerification: true @@ -24,7 +24,7 @@ timeouts: {% endif %} controlPlane: localAPIEndpoint: - advertiseAddress: {{ kube_apiserver_address }} + advertiseAddress: "{{ kube_apiserver_address }}" bindPort: {{ kube_apiserver_port }} certificateKey: {{ kubeadm_certificate_key }} nodeRegistration: diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index 6e562d8ce30..2ef7376a9cc 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -4,7 +4,7 @@ # noqa: jinja[spacing] kubeadm_discovery_address: >- {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%} - {{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} + {{ first_kube_control_plane_address | ansible.utils.ipwrap }}:{{ kube_apiserver_port }} {%- else -%} {{ kube_apiserver_endpoint | replace("https://", "") }} {%- endif %} diff --git a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2 b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2 index 3dfe5d1fa9f..e2e450b3861 100644 --- a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2 +++ b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2 @@ -8,9 +8,9 @@ discovery: {% else %} bootstrapToken: {% if kubeadm_config_api_fqdn is defined %} - apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} + apiServerEndpoint: "{{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}" {% else %} - apiServerEndpoint: {{ kubeadm_discovery_address }} + apiServerEndpoint: "{{ kubeadm_discovery_address }}" {% endif %} token: {{ kubeadm_token }} {% if ca_cert_content is defined %} @@ -32,7 +32,7 @@ caCertPath: {{ kube_cert_dir }}/ca.crt {% if kubeadm_cert_controlplane is defined and kubeadm_cert_controlplane %} controlPlane: localAPIEndpoint: - advertiseAddress: {{ kube_apiserver_address }} + advertiseAddress: "{{ kube_apiserver_address }}" bindPort: {{ kube_apiserver_port }} certificateKey: {{ kubeadm_certificate_key }} {% endif %} diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index c51e00f0e46..7bd4a9c4ab4 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -1,9 +1,10 @@ --- # advertised host IP for kubelet. This affects network plugin config. Take caution -kubelet_address: "{{ ip | default(fallback_ip) }}{{ (',' + ip6) if enable_dual_stack_networks and ip6 is defined else '' }}" +# add ipv6 manual for dualstack mode because ipv4 priority in main_ip for dualstack +kubelet_address: "{{ main_ips | join(',') }}" -# bind address for kubelet. Set to 0.0.0.0 to listen on all interfaces -kubelet_bind_address: "{{ ip | default('0.0.0.0') }}" +# bind address for kubelet. Set to :: to listen on all interfaces +kubelet_bind_address: "{{ main_ip | default('::') }}" # resolv.conf to base dns config kube_resolv_conf: "/etc/resolv.conf" @@ -27,11 +28,12 @@ kubelet_systemd_hardening: false kubelet_systemd_wants_dependencies: [] # List of secure IPs for kubelet +# don't forget ipv6 addresses for dualstack(because "main_ip" prioritizes ipv4) kube_node_addresses: >- {%- for host in (groups['k8s_cluster'] | union(groups['etcd'])) -%} - {{ hostvars[host]['ip'] | default(hostvars[host]['fallback_ip']) }}{{ ' ' if not loop.last else '' }} + {{ hostvars[host]['main_ips'] | join(' ') }}{{ ' ' if not loop.last else '' }} {%- endfor -%} -kubelet_secure_addresses: "localhost link-local {{ kube_pods_subnet }} {{ kube_node_addresses }}" +kubelet_secure_addresses: "localhost link-local {{ kube_pods_subnets | regex_replace(',', ' ') }} {{ kube_node_addresses }}" # Reserve this space for kube resources # Whether to run kubelet and container-engine daemons in a dedicated cgroup. (Not required for resource reservations). @@ -190,7 +192,7 @@ conntrack_modules: ## Enable distributed tracing for kubelet kubelet_tracing: false -kubelet_tracing_endpoint: 0.0.0.0:4317 +kubelet_tracing_endpoint: "[::]:4317" kubelet_tracing_sampling_rate_per_million: 100 # The maximum number of image pulls in parallel. Set it to a integer great than 1 to enable image pulling in parallel. diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index b8a5fcebf9b..3f45c594377 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -27,7 +27,7 @@ - name: Install nginx-proxy import_tasks: loadbalancer/nginx-proxy.yml when: - - ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0') + - ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '::') - loadbalancer_apiserver_localhost - loadbalancer_apiserver_type == 'nginx' tags: @@ -36,7 +36,7 @@ - name: Install haproxy import_tasks: loadbalancer/haproxy.yml when: - - ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0') + - ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '::') - loadbalancer_apiserver_localhost - loadbalancer_apiserver_type == 'haproxy' tags: diff --git a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 index 0474bf9bb11..46e72c02534 100644 --- a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 +++ b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 @@ -29,10 +29,10 @@ containerLogMaxSize: {{ kubelet_logfiles_max_size }} containerRuntimeEndpoint : {{ cri_socket }} maxPods: {{ kubelet_max_pods }} podPidsLimit: {{ kubelet_pod_pids_limit }} -address: {{ kubelet_bind_address }} +address: "{{ kubelet_bind_address }}" readOnlyPort: {{ kube_read_only_port }} healthzPort: {{ kubelet_healthz_port }} -healthzBindAddress: {{ kubelet_healthz_bind_address }} +healthzBindAddress: "{{ kubelet_healthz_bind_address }}" kubeletCgroups: {{ kubelet_kubelet_cgroups }} clusterDomain: {{ dns_domain }} {% if kubelet_protect_kernel_defaults | bool %} @@ -130,7 +130,7 @@ topologyManagerScope: {{ kubelet_topology_manager_scope }} {% endif %} {% if kubelet_tracing %} tracing: - endpoint: {{ kubelet_tracing_endpoint }} + endpoint: "{{ kubelet_tracing_endpoint }}" samplingRatePerMillion: {{ kubelet_tracing_sampling_rate_per_million }} {% endif %} maxParallelImagePulls: {{ kubelet_max_parallel_image_pulls }} diff --git a/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 b/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 index 1cd8b411499..bd6514a15ba 100644 --- a/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 +++ b/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 @@ -22,7 +22,7 @@ defaults {% if loadbalancer_apiserver_healthcheck_port is defined -%} frontend healthz bind 0.0.0.0:{{ loadbalancer_apiserver_healthcheck_port }} - {% if enable_dual_stack_networks -%} + {% if ipv6_stack -%} bind :::{{ loadbalancer_apiserver_healthcheck_port }} {% endif -%} mode http @@ -31,7 +31,7 @@ frontend healthz frontend kube_api_frontend bind 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} - {% if enable_dual_stack_networks -%} + {% if ipv6_stack -%} bind [::1]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}; {% endif -%} mode tcp @@ -45,5 +45,5 @@ backend kube_api_backend option httpchk GET /healthz http-check expect status 200 {% for host in groups['kube_control_plane'] -%} - server {{ host }} {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['fallback_ip'])) }}:{{ kube_apiserver_port }} check check-ssl verify none + server {{ host }} {{ hostvars[host]['main_access_ip'] | ansible.utils.ipwrap }}:{{ kube_apiserver_port }} check check-ssl verify none {% endfor -%} diff --git a/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 b/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 index d6b5cce4ee7..c57f47bd140 100644 --- a/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 +++ b/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 @@ -14,13 +14,13 @@ stream { upstream kube_apiserver { least_conn; {% for host in groups['kube_control_plane'] -%} - server {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['fallback_ip'])) }}:{{ kube_apiserver_port }}; + server {{ hostvars[host]['main_access_ip'] | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}; {% endfor -%} } server { listen 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}; - {% if enable_dual_stack_networks -%} + {% if ipv6_stack -%} listen [::1]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}; {% endif -%} proxy_pass kube_apiserver; @@ -44,7 +44,7 @@ http { {% if loadbalancer_apiserver_healthcheck_port is defined -%} server { listen {{ loadbalancer_apiserver_healthcheck_port }}; - {% if enable_dual_stack_networks -%} + {% if ipv6_stack -%} listen [::]:{{ loadbalancer_apiserver_healthcheck_port }}; {% endif -%} location /healthz { diff --git a/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 b/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 index 4b8af602059..7451022875a 100644 --- a/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 +++ b/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 @@ -5,7 +5,7 @@ clusters: - name: local cluster: certificate-authority: {{ kube_cert_dir }}/ca.pem - server: {{ kube_apiserver_endpoint }} + server: "{{ kube_apiserver_endpoint }}" users: - name: kubelet user: diff --git a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml index 4f6a741d12e..cd77120f5c1 100644 --- a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml @@ -1,7 +1,7 @@ --- - name: Stop if any host not in '--limit' does not have a fact cache vars: - uncached_hosts: "{{ hostvars | dict2items | selectattr('value.ansible_default_ipv4', 'undefined') | map(attribute='key') }}" + uncached_hosts: "{{ hostvars | dict2items | selectattr('value.ansible_default_ipv6', 'value.ansible_default_ipv4', 'undefined') | map(attribute='key') }}" excluded_hosts: "{{ groups['k8s_cluster'] | difference(query('inventory_hostnames', ansible_limit)) }}" assert: that: uncached_hosts | intersect(excluded_hosts) == [] @@ -105,6 +105,7 @@ - not ignore_assert_errors - ('k8s_cluster' in group_names) - kube_network_plugin not in ['calico', 'none'] + - ipv4_stack | bool - name: Stop if ip var does not match local ips assert: @@ -125,16 +126,16 @@ {%- endif -%} state: present when: - - access_ip is defined + - main_access_ip is defined - not ignore_assert_errors - ping_access_ip - not is_fedora_coreos - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] - name: Stop if access_ip is not pingable - command: ping -c1 {{ access_ip }} + command: ping -c1 {{ main_access_ip }} when: - - access_ip is defined + - main_access_ip is defined - not ignore_assert_errors - ping_access_ip changed_when: false @@ -179,12 +180,19 @@ - cloud-provider - facts +- name: Warn if `enable_dual_stack_networks` is set + debug: + msg: "WARNING! => `enable_dual_stack_networks` deprecation. Please switch to using ipv4_stack and ipv6_stack." + when: + - enable_dual_stack_networks is defined + - name: "Check that kube_service_addresses is a network range" assert: that: - kube_service_addresses | ansible.utils.ipaddr('net') msg: "kube_service_addresses = '{{ kube_service_addresses }}' is not a valid network range" run_once: true + when: ipv4_stack | bool - name: "Check that kube_pods_subnet is a network range" assert: @@ -192,6 +200,7 @@ - kube_pods_subnet | ansible.utils.ipaddr('net') msg: "kube_pods_subnet = '{{ kube_pods_subnet }}' is not a valid network range" run_once: true + when: ipv4_stack | bool - name: "Check that kube_pods_subnet does not collide with kube_service_addresses" assert: @@ -199,13 +208,50 @@ - kube_pods_subnet | ansible.utils.ipaddr(kube_service_addresses) | string == 'None' msg: "kube_pods_subnet cannot be the same network segment as kube_service_addresses" run_once: true + when: ipv4_stack | bool -- name: "Check that IP range is enough for the nodes" +- name: "Check that ipv4 IP range is enough for the nodes" assert: that: - 2 ** (kube_network_node_prefix - kube_pods_subnet | ansible.utils.ipaddr('prefix')) >= groups['k8s_cluster'] | length - msg: "Not enough IPs are available for the desired node count." - when: kube_network_plugin != 'calico' + msg: "Not enough ipv4 IPs are available for the desired node count." + when: + - ipv4_stack | bool + - kube_network_plugin != 'calico' + run_once: true + +- name: "Check that kube_service_addresses_ipv6 is a network range" + assert: + that: + - kube_service_addresses_ipv6 | ansible.utils.ipaddr('net') + msg: "kube_service_addresses_ipv6 = '{{ kube_service_addresses_ipv6 }}' is not a valid network range" + run_once: true + when: ipv6_stack | bool + +- name: "Check that kube_pods_subnet_ipv6 is a network range" + assert: + that: + - kube_pods_subnet_ipv6 | ansible.utils.ipaddr('net') + msg: "kube_pods_subnet_ipv6 = '{{ kube_pods_subnet_ipv6 }}' is not a valid network range" + run_once: true + when: ipv6_stack | bool + +- name: "Check that kube_pods_subnet_ipv6 does not collide with kube_service_addresses_ipv6" + assert: + that: + - kube_pods_subnet_ipv6 | ansible.utils.ipaddr(kube_service_addresses_ipv6) | string == 'None' + msg: "kube_pods_subnet_ipv6 cannot be the same network segment as kube_service_addresses_ipv6" + run_once: true + when: ipv6_stack | bool + +- name: "Check that ipv6 IP range is enough for the nodes" + assert: + that: + - 2 ** (kube_network_node_prefix_ipv6 - kube_pods_subnet_ipv6 | ansible.utils.ipaddr('prefix')) >= groups['k8s_cluster'] | length + msg: "Not enough ipv6 IPs are available for the desired node count." + when: + - ipv6_stack | bool + - kube_network_plugin != 'calico' run_once: true - name: Stop if unsupported options selected diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml index 4345fdd785c..2d58fcc0a24 100644 --- a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml +++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml @@ -76,6 +76,7 @@ value: "1" state: present reload: true + when: ipv4_stack | bool - name: Enable ipv6 forwarding ansible.posix.sysctl: @@ -84,7 +85,7 @@ value: "1" state: present reload: true - when: enable_dual_stack_networks | bool + when: ipv6_stack | bool - name: Check if we need to set fs.may_detach_mounts stat: diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml index d549cdd85f5..2952a6313e2 100644 --- a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml +++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml @@ -2,11 +2,10 @@ - name: Hosts | create hosts list from inventory set_fact: etc_hosts_inventory_block: |- - {% for item in (groups['k8s_cluster'] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%} - {% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or 'ansible_default_ipv4' in hostvars[item] -%} - {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }} - {%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }} {% else %} {{ item }}.{{ dns_domain }} {{ item }} {% endif %} - + {% for item in (groups['k8s_cluster'] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique %} + {{ hostvars[item]['main_access_ip'] }} {{ hostvars[item]['ansible_hostname'] | default(item) }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] | default(item) }} + {% if ipv4_stack and ipv6_stack %} + {{ hostvars[item]['access_ip6'] | default(hostvars[item]['ip6'] | default(hostvars[item]['ansible_default_ipv6']['address'])) }} {{ hostvars[item]['ansible_hostname'] | default(item) }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] | default(item) }} {% endif %} {% endfor %} delegate_to: localhost diff --git a/roles/kubespray-defaults/defaults/main/main.yml b/roles/kubespray-defaults/defaults/main/main.yml index b832da74ab7..ad823458983 100644 --- a/roles/kubespray-defaults/defaults/main/main.yml +++ b/roles/kubespray-defaults/defaults/main/main.yml @@ -135,8 +135,8 @@ resolvconf_mode: host_resolvconf # Deploy netchecker app to verify DNS resolve as an HTTP service deploy_netchecker: false # Ip address of the kubernetes DNS service (called skydns for historical reasons) -skydns_server: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}" -skydns_server_secondary: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}" +skydns_server: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}" dns_domain: "{{ cluster_name }}" docker_dns_search_domains: - 'default.svc.{{ dns_domain }}' @@ -230,33 +230,39 @@ kube_pods_subnet: 10.233.64.0/18 kube_network_node_prefix: 24 # Configure Dual Stack networking (i.e. both IPv4 and IPv6) -enable_dual_stack_networks: false +# enable_dual_stack_networks: false # deprecated + +# Configure IPv4 Stack networking +ipv4_stack: true +# Configure IPv6 Stack networking +ipv6_stack: "{{ enable_dual_stack_networks | default(false) }}" # Kubernetes internal network for IPv6 services, unused block of space. -# This is only used if enable_dual_stack_networks is set to true +# This is only used if ipv6_stack is set to true # This provides 4096 IPv6 IPs kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 # Internal network. When used, it will assign IPv6 addresses from this range to individual pods. # This network must not already be in your network infrastructure! -# This is only used if enable_dual_stack_networks is set to true. +# This is only used if ipv6_stack is set to true. # This provides room for 256 nodes with 254 pods per node. kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 # IPv6 subnet size allocated to each for pods. -# This is only used if enable_dual_stack_networks is set to true +# This is only used if ipv6_stack is set to true # This provides room for 254 pods per node. kube_network_node_prefix_ipv6: 120 + # The virtual cluster IP, real host IPs and ports the API Server will be # listening on. # NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint # access IP value (automatically evaluated below) -kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}" +kube_apiserver_ip: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}" # NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost # loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too. -kube_apiserver_bind_address: 0.0.0.0 +kube_apiserver_bind_address: "::" # https kube_apiserver_port: 6443 @@ -608,9 +614,9 @@ ssl_ca_dirs: |- # Vars for pointing to kubernetes api endpoints kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}" -kube_apiserver_address: "{{ ip | default(hostvars[inventory_hostname]['fallback_ip']) }}" -kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}" -first_kube_control_plane_address: "{{ hostvars[groups['kube_control_plane'][0]]['access_ip'] | default(hostvars[groups['kube_control_plane'][0]]['ip'] | default(hostvars[groups['kube_control_plane'][0]]['fallback_ip'])) }}" +kube_apiserver_address: "{{ hostvars[inventory_hostname]['main_ip'] }}" +kube_apiserver_access_address: "{{ hostvars[inventory_hostname]['main_access_ip'] }}" +first_kube_control_plane_address: "{{ hostvars[groups['kube_control_plane'][0]]['main_access_ip'] }}" loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}" loadbalancer_apiserver_type: "nginx" # applied if only external loadbalancer_apiserver is defined, otherwise ignored @@ -621,7 +627,7 @@ kube_apiserver_global_endpoint: |- {%- elif loadbalancer_apiserver_localhost and (loadbalancer_apiserver_port is not defined or loadbalancer_apiserver_port == kube_apiserver_port) -%} https://localhost:{{ kube_apiserver_port }} {%- else -%} - https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} + https://{{ first_kube_control_plane_address | ansible.utils.ipwrap }}:{{ kube_apiserver_port }} {%- endif %} kube_apiserver_endpoint: |- {% if loadbalancer_apiserver is defined -%} @@ -629,9 +635,9 @@ kube_apiserver_endpoint: |- {%- elif ('kube_control_plane' not in group_names) and loadbalancer_apiserver_localhost -%} https://localhost:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} {%- elif 'kube_control_plane' in group_names -%} - https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0', '127.0.0.1') }}:{{ kube_apiserver_port }} + https://{{ kube_apiserver_bind_address | regex_replace('::', '127.0.0.1') | ansible.utils.ipwrap }}:{{ kube_apiserver_port }} {%- else -%} - https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} + https://{{ first_kube_control_plane_address | ansible.utils.ipwrap }}:{{ kube_apiserver_port }} {%- endif %} kube_apiserver_client_cert: "{{ kube_cert_dir }}/ca.crt" kube_apiserver_client_key: "{{ kube_cert_dir }}/ca.key" @@ -643,41 +649,41 @@ etcd_events_cluster_enabled: false etcd_hosts: "{{ groups['etcd'] | default(groups['kube_control_plane']) }}" # Vars for pointing to etcd endpoints -etcd_address: "{{ ip | default(fallback_ip) }}" -etcd_access_address: "{{ access_ip | default(etcd_address) }}" -etcd_events_access_address: "{{ access_ip | default(etcd_address) }}" -etcd_peer_url: "https://{{ etcd_access_address }}:2380" -etcd_client_url: "https://{{ etcd_access_address }}:2379" -etcd_events_peer_url: "https://{{ etcd_events_access_address }}:2382" -etcd_events_client_url: "https://{{ etcd_events_access_address }}:2383" +etcd_address: "{{ hostvars[inventory_hostname]['main_ip'] }}" +etcd_access_address: "{{ hostvars[inventory_hostname]['main_access_ip'] }}" +etcd_events_access_address: "{{ hostvars[inventory_hostname]['main_access_ip'] }}" +etcd_peer_url: "https://{{ etcd_access_address | ansible.utils.ipwrap }}:2380" +etcd_client_url: "https://{{ etcd_access_address | ansible.utils.ipwrap }}:2379" +etcd_events_peer_url: "https://{{ etcd_events_access_address | ansible.utils.ipwrap }}:2382" +etcd_events_client_url: "https://{{ etcd_events_access_address | ansible.utils.ipwrap }}:2383" etcd_access_addresses: |- {% for item in etcd_hosts -%} - https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(hostvars[item]['fallback_ip'])) }}:2379{% if not loop.last %},{% endif %} + https://{{ hostvars[item]['main_access_ip'] | ansible.utils.ipwrap }}:2379{% if not loop.last %},{% endif %} {%- endfor %} etcd_events_access_addresses_list: |- [ {% for item in etcd_hosts -%} - 'https://{{ hostvars[item]['etcd_events_access_address'] | default(hostvars[item]['ip'] | default(hostvars[item]['fallback_ip'])) }}:2383'{% if not loop.last %},{% endif %} + 'https://{{ hostvars[item].main_access_ip | ansible.utils.ipwrap }}:2383'{% if not loop.last %},{% endif %} {%- endfor %} ] etcd_metrics_addresses: |- {% for item in etcd_hosts -%} - https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(hostvars[item]['fallback_ip'])) }}:{{ etcd_metrics_port | default(2381) }}{% if not loop.last %},{% endif %} + https://{{ hostvars[item]['main_access_ip'] | ansible.utils.ipwrap }}:{{ etcd_metrics_port | default(2381) }}{% if not loop.last %},{% endif %} {%- endfor %} etcd_events_access_addresses: "{{ etcd_events_access_addresses_list | join(',') }}" etcd_events_access_addresses_semicolon: "{{ etcd_events_access_addresses_list | join(';') }}" # user should set etcd_member_name in inventory/mycluster/hosts.ini etcd_member_name: |- {% for host in groups['etcd'] %} - {% if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index | string) }}{% endif %} + {% if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index | string) }}{% endif %} {% endfor %} etcd_peer_addresses: |- {% for item in groups['etcd'] -%} - {{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}=https://{{ hostvars[item].etcd_access_address | default(hostvars[item].ip | default(hostvars[item]['fallback_ip'])) }}:2380{% if not loop.last %},{% endif %} + {{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}=https://{{ hostvars[item]['main_access_ip'] | ansible.utils.ipwrap }}:2380{% if not loop.last %},{% endif %} {%- endfor %} etcd_events_peer_addresses: |- {% for item in groups['etcd'] -%} - {{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}-events=https://{{ hostvars[item].etcd_events_access_address | default(hostvars[item].ip | default(hostvars[item]['fallback_ip'])) }}:2382{% if not loop.last %},{% endif %} + {{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}-events=https://{{ hostvars[item]['main_access_ip'] | ansible.utils.ipwrap }}:2382{% if not loop.last %},{% endif %} {%- endfor %} etcd_heartbeat_interval: "250" diff --git a/roles/kubespray-defaults/tasks/main.yaml b/roles/kubespray-defaults/tasks/main.yaml index 1009f4950e3..b2f6a7c9a86 100644 --- a/roles/kubespray-defaults/tasks/main.yaml +++ b/roles/kubespray-defaults/tasks/main.yaml @@ -18,6 +18,38 @@ fallback_ip: "{{ ansible_default_ipv4.address | d('127.0.0.1') }}" when: fallback_ip is not defined + - name: Gather ansible_default_ipv6 + setup: + gather_subset: '!all,network' + filter: "ansible_default_ipv6" + when: ansible_default_ipv6 is not defined + ignore_unreachable: true + - name: Set fallback_ip6 + set_fact: + fallback_ip6: "{{ ansible_default_ipv6.address | d('::1') }}" + when: fallback_ip6 is not defined + + - name: Set main access ip(access_ip based on ipv4_stack/ipv6_stack options). + set_fact: + main_access_ip: >- + {%- if ipv4_stack -%} + {{ access_ip | default(ip | default(fallback_ip)) }} + {%- else -%} + {{ access_ip6 | default(ip6 | default(fallback_ip6)) }} + {%- endif -%} + + - name: Set main ip(ip based on ipv4_stack/ipv6_stack options). + set_fact: + main_ip: "{{ (ip | default(fallback_ip)) if ipv4_stack else (ip6 | default(fallback_ip6)) }}" + + - name: Set main access ips(mixed ips for dualstack). + set_fact: + main_access_ips: ["{{ (main_access_ip + ',' + (access_ip6 | default(ip6 | default(fallback_ip6)))) if (ipv4_stack and ipv6_stack) else main_access_ip }}"] + + - name: Set main ips(mixed ips for dualstack). + set_fact: + main_ips: ["{{ (main_ip + ',' + (ip6 | default(fallback_ip6))) if (ipv4_stack and ipv6_stack) else main_ip }}"] + - name: Set no_proxy import_tasks: no_proxy.yml when: diff --git a/roles/kubespray-defaults/tasks/no_proxy.yml b/roles/kubespray-defaults/tasks/no_proxy.yml index 4aa85f7f232..56b9446d842 100644 --- a/roles/kubespray-defaults/tasks/no_proxy.yml +++ b/roles/kubespray-defaults/tasks/no_proxy.yml @@ -13,7 +13,7 @@ {% set cluster_or_control_plane = 'k8s_cluster' %} {%- endif -%} {%- for item in (groups[cluster_or_control_plane] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%} - {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['fallback_ip'])) }}, + {{ hostvars[item]['main_access_ip'] }}, {%- if item != hostvars[item].get('ansible_hostname', '') -%} {{ hostvars[item]['ansible_hostname'] }}, {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }}, @@ -23,7 +23,7 @@ {%- if additional_no_proxy is defined -%} {{ additional_no_proxy }}, {%- endif -%} - 127.0.0.1,localhost,{{ kube_service_addresses }},{{ kube_pods_subnet }},svc,svc.{{ dns_domain }} + 127.0.0.1,localhost,{{ kube_service_subnets }},{{ kube_pods_subnets }},svc,svc.{{ dns_domain }} delegate_to: localhost connection: local delegate_facts: true diff --git a/roles/kubespray-defaults/vars/main.yml b/roles/kubespray-defaults/vars/main.yml index c79edf50b38..3b8be9465d8 100644 --- a/roles/kubespray-defaults/vars/main.yml +++ b/roles/kubespray-defaults/vars/main.yml @@ -7,3 +7,23 @@ kube_proxy_deployed: "{{ 'addon/kube-proxy' not in kubeadm_init_phases_skip }}" calico_min_version_required: "v3.19.4" containerd_min_version_required: "1.3.7" + +# mixed kube_service_addresses/kube_service_addresses_ipv6 for a variety of network stacks(dualstack, ipv6only, ipv4only) +kube_service_subnets: >- + {%- if ipv4_stack and ipv6_stack -%} + {{ kube_service_addresses }},{{ kube_service_addresses_ipv6 }} + {%- elif ipv4_stack -%} + {{ kube_service_addresses }} + {%- else -%} + {{ kube_service_addresses_ipv6 }} + {%- endif -%} + +# mixed kube_pods_subnet/kube_pods_subnet_ipv6 for a variety of network stacks(dualstack, ipv6only, ipv4only) +kube_pods_subnets: >- + {%- if ipv4_stack and ipv6_stack -%} + {{ kube_pods_subnet }},{{ kube_pods_subnet_ipv6 }} + {%- elif ipv4_stack -%} + {{ kube_pods_subnet }} + {%- else -%} + {{ kube_pods_subnet_ipv6 }} + {%- endif -%} diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml index aef34bb2cdf..0991d5551fb 100644 --- a/roles/network_plugin/calico/tasks/check.yml +++ b/roles/network_plugin/calico/tasks/check.yml @@ -146,12 +146,16 @@ check_mode: false register: calico run_once: true + when: ipv4_stack | bool delegate_to: "{{ groups['kube_control_plane'][0] }}" - name: "Set calico_pool_conf" set_fact: calico_pool_conf: '{{ calico.stdout | from_json }}' - when: calico.rc == 0 and calico.stdout + when: + - ipv4_stack | bool + - calico is defined + - calico.rc == 0 and calico.stdout run_once: true delegate_to: "{{ groups['kube_control_plane'][0] }}" @@ -164,10 +168,45 @@ - not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode msg: "Your inventory doesn't match the current cluster configuration" when: + - ipv4_stack | bool - calico_pool_conf is defined run_once: true delegate_to: "{{ groups['kube_control_plane'][0] }}" +- name: "Get Calico {{ calico_pool_name }}-ipv6 configuration" + command: "{{ bin_dir }}/calicoctl.sh get ipPool {{ calico_pool_name }}-ipv6 -o json" + failed_when: false + changed_when: false + check_mode: false + register: calico_ipv6 + run_once: true + when: ipv6_stack | bool + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Set calico_pool_ipv6_conf" + set_fact: + calico_pool_conf: '{{ calico_ipv6.stdout | from_json }}' + when: + - ipv6_stack | bool + - alico_ipv6 is defined + - calico_ipv6.rc == 0 and calico_ipv6.stdout + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check if ipv6 inventory match current cluster configuration" + assert: + that: + - calico_pool_conf.spec.blockSize | int == calico_pool_blocksize_ipv6 | int + - calico_pool_conf.spec.cidr == (calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6)) + - not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode_ipv6 + - not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode_ipv6 + msg: "Your ipv6 inventory doesn't match the current cluster configuration" + when: + - ipv6_stack | bool + - calico_pool_ipv6_conf is defined + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + - name: "Check kdd calico_datastore if calico_apiserver_enabled" assert: that: calico_datastore == "kdd" @@ -191,7 +230,6 @@ that: - "calico_ipip_mode_ipv6 in ['Never']" msg: "Calico doesn't support ipip tunneling for the IPv6" - when: - - enable_dual_stack_networks + when: ipv6_stack | bool run_once: true delegate_to: "{{ groups['kube_control_plane'][0] }}" diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml index 2d80b110cdf..70cdc1d004b 100644 --- a/roles/network_plugin/calico/tasks/install.yml +++ b/roles/network_plugin/calico/tasks/install.yml @@ -84,6 +84,7 @@ changed_when: false when: - inventory_hostname == groups['kube_control_plane'][0] + - ipv4_stack | bool - name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined assert: @@ -91,8 +92,9 @@ msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}" when: - inventory_hostname == groups['kube_control_plane'][0] - - 'calico_conf.stdout == "0"' + - ipv4_stack | bool - calico_pool_cidr is defined + - 'calico_conf.stdout == "0"' - name: Calico | Check if calico IPv6 network pool has already been configured # noqa risky-shell-pipe - grep will exit 1 if no match found @@ -107,7 +109,7 @@ changed_when: false when: - inventory_hostname == groups['kube_control_plane'][0] - - enable_dual_stack_networks + - ipv6_stack - name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined assert: @@ -115,9 +117,9 @@ msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}" when: - inventory_hostname == groups['kube_control_plane'][0] + - ipv6_stack | bool - calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0" - calico_pool_cidr_ipv6 is defined - - enable_dual_stack_networks - name: Calico | kdd specific configuration when: @@ -206,6 +208,7 @@ - name: Calico | Configure Calico IP Pool when: - inventory_hostname == groups['kube_control_plane'][0] + - ipv4_stack | bool block: - name: Calico | Get existing calico network pool command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json" @@ -256,7 +259,7 @@ - name: Calico | Configure Calico IPv6 Pool when: - inventory_hostname == groups['kube_control_plane'][0] - - enable_dual_stack_networks | bool + - ipv6_stack | bool block: - name: Calico | Get existing calico ipv6 network pool command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json" @@ -350,7 +353,15 @@ {% if not calico_no_global_as_num | default(false) %}"asNumber": {{ global_as_num }},{% endif %} "nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled | default('true') }} , {% if calico_advertise_cluster_ips | default(false) %} - "serviceClusterIPs": [{"cidr": "{{ kube_service_addresses }}" } {{ ',{"cidr":"' + kube_service_addresses_ipv6 + '"}' if enable_dual_stack_networks else '' }}],{% endif %} + "serviceClusterIPs": >- + {%- if ipv4_stack and ipv6_stack-%} + [{"cidr": "{{ kube_service_addresses }}", "cidr": "{{ kube_service_addresses_ipv6 }}"}], + {%- elif ipv6_stack-%} + [{"cidr": "{{ kube_service_addresses_ipv6 }}"}], + {%- else -%} + [{"cidr": "{{ kube_service_addresses }}"}], + {%- endif -%} + {% endif %} {% if calico_advertise_service_loadbalancer_ips | length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %} "serviceExternalIPs": {{ _service_external_ips | default([]) }} } diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2 index d949af1ec6b..1e87917ea71 100644 --- a/roles/network_plugin/calico/templates/calico-config.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-config.yml.j2 @@ -53,13 +53,15 @@ data: "type": "host-local", "subnet": "usePodCidr" }, - {% else %} + {% else %} "ipam": { "type": "calico-ipam", - {% if enable_dual_stack_networks %} - "assign_ipv6": "true", - {% endif %} - "assign_ipv4": "true" + {% if ipv4_stack %} + "assign_ipv4": "true"{{ ',' if (ipv6_stack and ipv4_stack) }} + {% endif %} + {% if ipv6_stack %} + "assign_ipv6": "true" + {% endif %} }, {% endif %} {% if calico_allow_ip_forwarding %} diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index cce777280ba..d5b509bbafe 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -265,7 +265,7 @@ spec: - name: CALICO_IPV4POOL_VXLAN value: "Never" - name: FELIX_IPV6SUPPORT - value: "{{ enable_dual_stack_networks | default(false) }}" + value: "{{ ipv6_stack | default(false) }}" # Set Felix logging to "info" - name: FELIX_LOGSEVERITYSCREEN value: "{{ calico_loglevel }}" @@ -308,9 +308,18 @@ spec: - name: IP_AUTODETECTION_METHOD value: "can-reach=$(NODEIP)" {% endif %} +{% if ipv4_stack %} - name: IP value: "autodetect" -{% if calico_ip6_auto_method is defined and enable_dual_stack_networks %} +{% else %} + - name: IP + value: none +{% endif %} +{% if ipv6_stack %} + - name: IP6 + value: autodetect +{% endif %} +{% if calico_ip6_auto_method is defined and ipv6_stack %} - name: IP6_AUTODETECTION_METHOD value: "{{ calico_ip6_auto_method }}" {% endif %} @@ -318,10 +327,6 @@ spec: - name: FELIX_MTUIFACEPATTERN value: "{{ calico_felix_mtu_iface_pattern }}" {% endif %} -{% if enable_dual_stack_networks %} - - name: IP6 - value: autodetect -{% endif %} {% if calico_use_default_route_src_ipaddr | default(false) %} - name: FELIX_DEVICEROUTESOURCEADDRESS valueFrom: diff --git a/roles/network_plugin/calico_defaults/defaults/main.yml b/roles/network_plugin/calico_defaults/defaults/main.yml index a9567e8866c..da899546b25 100644 --- a/roles/network_plugin/calico_defaults/defaults/main.yml +++ b/roles/network_plugin/calico_defaults/defaults/main.yml @@ -22,7 +22,7 @@ calico_pool_blocksize: 26 # Calico doesn't support ipip tunneling for the IPv6. calico_ipip_mode_ipv6: Never -calico_vxlan_mode_ipv6: Never +calico_vxlan_mode_ipv6: Always # add default ipv6 ippool blockSize calico_pool_blocksize_ipv6: 122 diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml index 9de2d331c87..535d576bdd7 100644 --- a/roles/network_plugin/cilium/defaults/main.yml +++ b/roles/network_plugin/cilium/defaults/main.yml @@ -4,8 +4,8 @@ cilium_min_version_required: "1.10" cilium_debug: false cilium_mtu: "" -cilium_enable_ipv4: true -cilium_enable_ipv6: false +cilium_enable_ipv4: "{{ ipv4_stack }}" +cilium_enable_ipv6: "{{ ipv6_stack }}" # Enable l2 announcement from cilium to replace Metallb Ref: https://docs.cilium.io/en/v1.14/network/l2-announcements/ cilium_l2announcements: false diff --git a/roles/network_plugin/flannel/defaults/main.yml b/roles/network_plugin/flannel/defaults/main.yml index 6bdbaf039b0..16ada70030f 100644 --- a/roles/network_plugin/flannel/defaults/main.yml +++ b/roles/network_plugin/flannel/defaults/main.yml @@ -2,7 +2,7 @@ # Flannel public IP # The address that flannel should advertise as how to access the system # Disabled until https://github.com/coreos/flannel/issues/712 is fixed -# flannel_public_ip: "{{ access_ip | default(ip | default(fallback_ip)) }}" +# flannel_public_ip: "{{ main_access_ip }}" ## interface that should be used for flannel operations ## This is actually an inventory cluster-level item diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 index 9c36d01ba5d..da4cfcde5b0 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -30,12 +30,14 @@ data: } net-conf.json: | { +{% if ipv4_stack %} "Network": "{{ kube_pods_subnet }}", "EnableIPv4": true, -{% if enable_dual_stack_networks %} +{% endif %} +{% if ipv6_stack %} "EnableIPv6": true, "IPv6Network": "{{ kube_pods_subnet_ipv6 }}", -{% endif %} +{% endif %} "Backend": { "Type": "{{ flannel_backend_type }}"{% if flannel_backend_type == "vxlan" %}, "VNI": {{ flannel_vxlan_vni }}, diff --git a/roles/network_plugin/kube-ovn/defaults/main.yml b/roles/network_plugin/kube-ovn/defaults/main.yml index a06cba0b04b..4262a775b27 100644 --- a/roles/network_plugin/kube-ovn/defaults/main.yml +++ b/roles/network_plugin/kube-ovn/defaults/main.yml @@ -33,7 +33,7 @@ kube_ovn_central_replics: "{{ kube_ovn_central_hosts | length }}" kube_ovn_controller_replics: "{{ kube_ovn_central_hosts | length }}" kube_ovn_central_ips: |- {% for item in kube_ovn_central_hosts -%} - {{ hostvars[item]['ip'] | default(hostvars[item]['fallback_ip']) }}{% if not loop.last %},{% endif %} + {{ hostvars[item]['main_ip'] }}{% if not loop.last %},{% endif %} {%- endfor %} kube_ovn_ic_enable: false @@ -62,6 +62,15 @@ kube_ovn_traffic_mirror: false kube_ovn_external_address: 8.8.8.8 kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_address_merged: >- + {%- if ipv4_stack and ipv6_stack -%} + {{ kube_ovn_external_address }},{{ kube_ovn_external_address_ipv6 }} + {%- elif ipv4_stack -%} + {{ kube_ovn_external_address }} + {%- else -%} + {{ kube_ovn_external_address_ipv6 }} + {%- endif -%} + kube_ovn_external_dns: alauda.cn # kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 @@ -74,6 +83,14 @@ kube_ovn_u2o_interconnection: false # kube_ovn_default_exclude_ips: 10.16.0.1 kube_ovn_node_switch_cidr: 100.64.0.0/16 kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 +kube_ovn_node_switch_cidr_merged: >- + {%- if ipv4_stack and ipv6_stack -%} + {{ kube_ovn_node_switch_cidr }},{{ kube_ovn_node_switch_cidr_ipv6 }} + {%- elif ipv4_stack -%} + {{ kube_ovn_node_switch_cidr }} + {%- else -%} + {{ kube_ovn_node_switch_cidr_ipv6 }} + {%- endif -%} ## vlan config, set default interface name and vlan id # kube_ovn_default_interface_name: eth0 diff --git a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 index f4acdedac10..b0fad2ff550 100644 --- a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 +++ b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 @@ -240,14 +240,14 @@ spec: imagePullPolicy: {{ k8s_image_pull_policy }} args: - /kube-ovn/start-controller.sh - - --default-cidr={{ kube_pods_subnet }}{% if enable_dual_stack_networks %},{{ kube_ovn_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}{% endif %}{{ '' }} + - --default-cidr={{ kube_pods_subnets }} - --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{ '' }} - --default-gateway-check={{ kube_ovn_default_gateway_check | string }} - --default-logical-gateway={{ kube_ovn_default_logical_gateway | string }} - --default-u2o-interconnection={{ kube_ovn_u2o_interconnection }} - --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{ '' }} - - --node-switch-cidr={{ kube_ovn_node_switch_cidr }}{% if enable_dual_stack_networks %},{{ kube_ovn_node_switch_cidr_ipv6 }}{% endif %}{{ '' }} - - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }} + - --node-switch-cidr={{ kube_ovn_node_switch_cidr_merged }} + - --service-cluster-ip-range={{ kube_service_subnets }} - --network-type={{ kube_ovn_network_type }} - --default-interface-name={{ kube_ovn_default_interface_name | default('') }} - --default-vlan-id={{ kube_ovn_default_vlan_id }} @@ -403,7 +403,7 @@ spec: args: - --enable-mirror={{ kube_ovn_traffic_mirror | lower }} - --encap-checksum={{ kube_ovn_encap_checksum | lower }} - - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }} + - --service-cluster-ip-range={{ kube_service_subnets }} - --iface={{ kube_ovn_iface | default('') }} - --dpdk-tunnel-iface={{ kube_ovn_dpdk_tunnel_iface }} - --network-type={{ kube_ovn_network_type }} @@ -588,7 +588,7 @@ spec: command: - /kube-ovn/kube-ovn-pinger args: - - --external-address={{ kube_ovn_external_address }}{% if enable_dual_stack_networks %},{{ kube_ovn_external_address_ipv6 }}{% endif %}{{ '' }} + - --external-address={{ kube_ovn_external_address_merged }} - --external-dns={{ kube_ovn_external_dns }} - --logtostderr=false - --alsologtostderr=true @@ -837,7 +837,7 @@ spec: - name: metrics port: 10661 type: ClusterIP -{% if enable_dual_stack_networks %} +{% if ipv6_stack %} ipFamilyPolicy: PreferDualStack {% endif %} selector: @@ -852,7 +852,7 @@ metadata: labels: app: kube-ovn-pinger spec: -{% if enable_dual_stack_networks %} +{% if ipv6_stack %} ipFamilyPolicy: PreferDualStack {% endif %} selector: @@ -869,7 +869,7 @@ metadata: labels: app: kube-ovn-controller spec: -{% if enable_dual_stack_networks %} +{% if ipv6_stack %} ipFamilyPolicy: PreferDualStack {% endif %} selector: @@ -886,7 +886,7 @@ metadata: labels: app: kube-ovn-cni spec: -{% if enable_dual_stack_networks %} +{% if ipv6_stack %} ipFamilyPolicy: PreferDualStack {% endif %} selector: diff --git a/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 b/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 index 453ac60722d..09f0b291ae2 100644 --- a/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 +++ b/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 @@ -260,7 +260,7 @@ spec: port: 6641 targetPort: 6641 type: ClusterIP -{% if enable_dual_stack_networks %} +{% if ipv6_stack %} ipFamilyPolicy: PreferDualStack {% endif %} selector: @@ -280,7 +280,7 @@ spec: port: 6642 targetPort: 6642 type: ClusterIP -{% if enable_dual_stack_networks %} +{% if ipv6_stack %} ipFamilyPolicy: PreferDualStack {% endif %} selector: @@ -300,7 +300,7 @@ spec: port: 6643 targetPort: 6643 type: ClusterIP -{% if enable_dual_stack_networks %} +{% if ipv6_stack %} ipFamilyPolicy: PreferDualStack {% endif %} selector: diff --git a/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 b/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 index 42fd3170693..470885111d9 100644 --- a/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 +++ b/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 @@ -1,6 +1,6 @@ apiVersion: v1 kind: Config -clusterCIDR: {{ kube_pods_subnet }} +clusterCIDR: {{ kube_pods_subnets }} clusters: - name: cluster cluster: diff --git a/roles/network_plugin/weave/defaults/main.yml b/roles/network_plugin/weave/defaults/main.yml index 47469ae4c83..337d8e79982 100644 --- a/roles/network_plugin/weave/defaults/main.yml +++ b/roles/network_plugin/weave/defaults/main.yml @@ -18,7 +18,7 @@ weave_hairpin_mode: true # The range of IP addresses used by Weave Net and the subnet they are placed in # (CIDR format; default 10.32.0.0/12) -weave_ipalloc_range: "{{ kube_pods_subnet }}" +weave_ipalloc_range: "{{ kube_pods_subnets }}" # Set to 0 to disable Network Policy Controller (default is on) weave_expect_npc: "{{ enable_network_policy }}" diff --git a/roles/recover_control_plane/post-recover/tasks/main.yml b/roles/recover_control_plane/post-recover/tasks/main.yml index a62f9127e9b..5401d7d0647 100644 --- a/roles/recover_control_plane/post-recover/tasks/main.yml +++ b/roles/recover_control_plane/post-recover/tasks/main.yml @@ -6,10 +6,10 @@ etcd_servers: >- {% for host in groups['etcd'] -%} {% if not loop.last -%} - https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2379, + https://{{ hostvars[host]['main_access_ip'] | ansible.utils.ipwrap }}:2379, {%- endif -%} {%- if loop.last -%} - https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2379 + https://{{ hostvars[host]['main_access_ip'] | ansible.utils.ipwrap }}:2379 {%- endif -%} {%- endfor -%} diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml index 4b845ff6c58..d4efed01c75 100644 --- a/roles/remove-node/remove-etcd-node/tasks/main.yml +++ b/roles/remove-node/remove-etcd-node/tasks/main.yml @@ -7,8 +7,6 @@ changed_when: false when: - groups['kube_control_plane'] | length > 0 - - ip is not defined - - access_ip is not defined delegate_to: "{{ groups['kube_control_plane'] | first }}" - name: Remove etcd member from cluster @@ -29,7 +27,12 @@ - facts - name: Remove member from cluster vars: - node_ip: "{{ ip if ip is defined else (access_ip if access_ip is defined else (k8s_node_ips.stdout | from_json)[0]) }}" + node_ip: >- + {%- if not ipv4_stack -%} + {{ ip6 if ip6 is defined else (access_ip6 if access_ip6 is defined else (k8s_node_ips.stdout | from_json)[0]) | ansible.utils.ipwrap }} + {%- else -%} + {{ ip if ip is defined else (access_ip if access_ip is defined else (k8s_node_ips.stdout | from_json)[0]) | ansible.utils.ipwrap }} + {%- endif -%} command: argv: - "{{ bin_dir }}/etcdctl" diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 5d1b91c7b47..18e9622578f 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -189,7 +189,7 @@ - nat - mangle - raw - when: flush_iptables | bool + when: flush_iptables | bool and ipv4_stack tags: - iptables @@ -203,7 +203,7 @@ - nat - mangle - raw - when: flush_iptables | bool and enable_dual_stack_networks + when: flush_iptables | bool and ipv6_stack tags: - ip6tables diff --git a/tests/files/vagrant_ubuntu20-calico-dual-stack.yml b/tests/files/vagrant_ubuntu20-calico-dual-stack.yml deleted file mode 100644 index 3a45bdc8f2a..00000000000 --- a/tests/files/vagrant_ubuntu20-calico-dual-stack.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# Kubespray settings -enable_dual_stack_networks: true diff --git a/tests/files/vagrant_ubuntu20-calico-dual-stack.rb b/tests/files/vagrant_ubuntu24-calico-dual-stack.rb similarity index 76% rename from tests/files/vagrant_ubuntu20-calico-dual-stack.rb rename to tests/files/vagrant_ubuntu24-calico-dual-stack.rb index f7d7765ebea..51f945161ed 100644 --- a/tests/files/vagrant_ubuntu20-calico-dual-stack.rb +++ b/tests/files/vagrant_ubuntu24-calico-dual-stack.rb @@ -1,4 +1,6 @@ -# For CI we are not worried about data persistence across reboot +$os = "ubuntu2404" + +$vm_cpus = 2 $libvirt_volume_cache = "unsafe" # Checking for box update can trigger API rate limiting diff --git a/tests/files/vagrant_ubuntu24-calico-dual-stack.yml b/tests/files/vagrant_ubuntu24-calico-dual-stack.yml new file mode 100644 index 00000000000..7cdbbe6c3e5 --- /dev/null +++ b/tests/files/vagrant_ubuntu24-calico-dual-stack.yml @@ -0,0 +1,8 @@ +--- +# Instance settings +cloud_image: ubuntu-2404 +mode: default + +# Kubespray settings +ipv4_stack: true +ipv6_stack: true diff --git a/tests/files/vagrant_ubuntu24-calico-ipv6only-stack.rb b/tests/files/vagrant_ubuntu24-calico-ipv6only-stack.rb new file mode 100644 index 00000000000..51f945161ed --- /dev/null +++ b/tests/files/vagrant_ubuntu24-calico-ipv6only-stack.rb @@ -0,0 +1,9 @@ +$os = "ubuntu2404" + +$vm_cpus = 2 +$libvirt_volume_cache = "unsafe" + +# Checking for box update can trigger API rate limiting +# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html +$box_check_update = false +$network_plugin = "calico" diff --git a/tests/files/vagrant_ubuntu24-calico-ipv6only-stack.yml b/tests/files/vagrant_ubuntu24-calico-ipv6only-stack.yml new file mode 100644 index 00000000000..74d93cf6386 --- /dev/null +++ b/tests/files/vagrant_ubuntu24-calico-ipv6only-stack.yml @@ -0,0 +1,12 @@ +--- +# Instance settings +cloud_image: ubuntu-2404 +mode: default + +# Kubespray settings +ipv4_stack: false +ipv6_stack: true +kube_network_plugin: calico +etcd_deployment_type: kubeadm +kube_proxy_mode: iptables +enable_nodelocaldns: false diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml index 5714ccd6833..415aa18208e 100644 --- a/tests/testcases/010_check-apiserver.yml +++ b/tests/testcases/010_check-apiserver.yml @@ -5,7 +5,7 @@ tasks: - name: Check the API servers are responding uri: - url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port | default(6443) }}/version" + url: "https://{{ (access_ip if (ipv4_stack | default(true)) else access_ip6) | default(ansible_default_ipv4.address if (ipv4_stack | default(true)) else ansible_default_ipv6.address) | ansible.utils.ipwrap }}:{{ kube_apiserver_port | default(6443) }}/version" validate_certs: false status_code: 200 register: apiserver_response diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index 29d15c02060..1ee69164e4f 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -7,7 +7,7 @@ # TODO: source those from kubespray-defaults instead. # Needs kubespray-defaults to be decoupled from no-proxy stuff bin_dir: "/usr/local/bin" - kube_pods_subnet: 10.233.64.0/18 + kube_pods_subnet: "{{ 'fd85:ee78:d8a6:8607::1:0000/112' if not (ipv4_stack | default(true)) else '10.233.64.0/18' }}" tasks: @@ -115,7 +115,7 @@ | length == 2 - name: Curl between pods is working - command: "{{ bin_dir }}/kubectl -n test exec {{ item[0].metadata.name }} -- curl {{ item[1].status.podIP }}:8080" + command: "{{ bin_dir }}/kubectl -n test exec {{ item[0].metadata.name }} -- curl {{ item[1].status.podIP | ansible.utils.ipwrap}}:8080" with_nested: - "{{ pods }}" - "{{ pods }}" diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml index 946de80595a..e036fcba610 100644 --- a/tests/testcases/040_check-network-adv.yml +++ b/tests/testcases/040_check-network-adv.yml @@ -51,7 +51,7 @@ block: - name: Get netchecker agents uri: - url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/" + url: "http://{{ (ansible_default_ipv6.address if not (ipv4_stack | default(true)) else ansible_default_ipv4.address) | ansible.utils.ipwrap }}:{{ netchecker_port }}/api/v1/agents/" return_content: true headers: Accept: application/json @@ -64,7 +64,7 @@ - name: Check netchecker status uri: - url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check" + url: "http://{{ (ansible_default_ipv6.address if not (ipv4_stack | default(true)) else ansible_default_ipv4.address) | ansible.utils.ipwrap }}:{{ netchecker_port }}/api/v1/connectivity_check" return_content: true headers: Accept: application/json