diff --git a/.gitignore b/.gitignore index 421314e..4957b28 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,5 @@ # Ignore any `build` directory build/ + +# Ignore kubeadm credentials +credentials diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000..dda9dfc --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,3 @@ +[defaults] +library = ./library/:./kubespray/library/ +roles_path = ./roles:./kubespray/roles diff --git a/ansible/inventory/group_vars/k8s_cluster/addons.yml b/ansible/inventory/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..e41aea6 --- /dev/null +++ b/ansible/inventory/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,260 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 10250 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.24" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx +# ingress_nginx_without_class: true +# ingress_nginx_default: false + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# cert_manager_dns_policy: "ClusterFirst" +# cert_manager_dns_config: +# nameservers: +# - "1.1.1.1" +# - "8.8.8.8" + +# cert_manager_controller_extra_args: +# - "--dns01-recursive-nameservers-only=true" +# - "--dns01-recursive-nameservers=1.1.1.1:53,8.8.8.8:53" + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_version: v0.13.9 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_config: +# speaker: +# nodeselector: +# kubernetes.io/os: "linux" +# tollerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# controller: +# nodeselector: +# kubernetes.io/os: "linux" +# tolerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# address_pools: +# primary: +# ip_range: +# - 10.5.0.0/16 +# auto_assign: true +# pool1: +# ip_range: +# - 10.6.0.0/16 +# auto_assign: true +# pool2: +# ip_range: +# - 10.10.0.0/16 +# auto_assign: true +# layer2: +# - primary +# layer3: +# defaults: +# peer_port: 179 +# hold_time: 120s +# communities: +# vpn-only: "1234:1" +# NO_ADVERTISE: "65535:65282" +# metallb_peers: +# peer1: +# peer_address: 10.6.0.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# communities: +# - vpn-only +# address_pool: +# - pool1 +# peer2: +# peer_address: 10.10.0.1 +# peer_asn: 64513 +# my_asn: 4200000000 +# communities: +# - NO_ADVERTISE +# address_pool: +# - pool2 + +argocd_enabled: false +# argocd_version: v2.8.4 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated and stored in `argocd-initial-admin-secret` in the argocd namespace defined above. +# Using the argocd CLI the generated password can be automatically be fetched from the current kubectl context with the command: +# argocd admin initial-password -n argocd +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" + +# Kube VIP +kube_vip_enabled: false +# kube_vip_arp_enabled: true +# kube_vip_controlplane_enabled: true +# kube_vip_address: 192.168.56.120 +# loadbalancer_apiserver: +# address: "{{ kube_vip_address }}" +# port: 6443 +# kube_vip_interface: eth0 +# kube_vip_services_enabled: false diff --git a/ansible/inventory/group_vars/k8s_cluster/k8s-cluster.yml b/ansible/inventory/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..454ba30 --- /dev/null +++ b/ansible/inventory/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,373 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.28.6 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses | ipaddr('net') | ipaddr(1) | ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in ['aws'] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential +# Apply extra options to coredns kubernetes plugin +# coredns_kubernetes_extra_opts: +# - 'fallthrough example.local' +# Forward extra domains to the coredns kubernetes plugin +# coredns_kubernetes_extra_domains: '' + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses | ipaddr('net') | ipaddr(3) | ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses | ipaddr('net') | ipaddr(4) | ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Set runtime and kubelet cgroups when using systemd as cgroup driver (default) +# kubelet_runtime_cgroups: "/{{ kube_service_cgroups }}/{{ container_manager }}.service" +# kubelet_kubelet_cgroups: "/{{ kube_service_cgroups }}/kubelet.service" + +## Set runtime and kubelet cgroups when using cgroupfs as cgroup driver +# kubelet_runtime_cgroups_cgroupfs: "/system.slice/{{ container_manager }}.service" +# kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service" + +# Optionally reserve this space for kube daemons. +# kube_reserved: false +## Uncomment to override default values +## The following two items need to be set when kube_reserved is true +# kube_reserved_cgroups_for_service_slice: kube.slice +# kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}" +# kube_memory_reserved: 256Mi +# kube_cpu_reserved: 100m +# kube_ephemeral_storage_reserved: 2Gi +# kube_pid_reserved: "1000" +# Reservation for master hosts +# kube_master_memory_reserved: 512Mi +# kube_master_cpu_reserved: 200m +# kube_master_ephemeral_storage_reserved: 2Gi +# kube_master_pid_reserved: "1000" + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +## The following two items need to be set when system_reserved is true +# system_reserved_cgroups_for_service_slice: system.slice +# system_reserved_cgroups: "/{{ system_reserved_cgroups_for_service_slice }}" +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/ansible/inventory/group_vars/k8s_cluster/k8s-net-calico.yml b/ansible/inventory/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/ansible/inventory/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/ansible/inventory/group_vars/k8s_cluster/k8s-net-cilium.yml b/ansible/inventory/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..a170484 --- /dev/null +++ b/ansible/inventory/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,264 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/partial) +# cilium_kube_proxy_replacement: partial + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true + +# A list of extra rules variables to add to clusterrole for cilium operator, formatted like: +# cilium_clusterrole_rules_operator_extra_vars: +# - apiGroups: +# - '""' +# resources: +# - pods +# verbs: +# - delete +# - apiGroups: +# - '""' +# resources: +# - nodes +# verbs: +# - list +# - watch +# resourceNames: +# - toto +# cilium_clusterrole_rules_operator_extra_vars: [] diff --git a/ansible/inventory/group_vars/k8s_cluster/k8s-net-custom-cni.yml b/ansible/inventory/group_vars/k8s_cluster/k8s-net-custom-cni.yml new file mode 100644 index 0000000..67b0481 --- /dev/null +++ b/ansible/inventory/group_vars/k8s_cluster/k8s-net-custom-cni.yml @@ -0,0 +1,51 @@ +--- +# custom_cni network plugin configuration +# There are two deployment options to choose from, select one + +## OPTION 1 - Static manifest files +## With this option, referred manifest file will be deployed +## as if the `kubectl apply -f` method was used with it. +# +## List of Kubernetes resource manifest files +## See tests/files/custom_cni/README.md for example +# custom_cni_manifests: [] + +## OPTION 1 EXAMPLE - Cilium static manifests in Kubespray tree +# custom_cni_manifests: +# - "{{ playbook_dir }}/../tests/files/custom_cni/cilium.yaml" + +## OPTION 2 - Helm chart application +## This allows the CNI backend to be deployed to Kubespray cluster +## as common Helm application. +# +## Helm release name - how the local instance of deployed chart will be named +# custom_cni_chart_release_name: "" +# +## Kubernetes namespace to deploy into +# custom_cni_chart_namespace: "kube-system" +# +## Helm repository name - how the local record of Helm repository will be named +# custom_cni_chart_repository_name: "" +# +## Helm repository URL +# custom_cni_chart_repository_url: "" +# +## Helm chart reference - path to the chart in the repository +# custom_cni_chart_ref: "" +# +## Helm chart version +# custom_cni_chart_version: "" +# +## Custom Helm values to be used for deployment +# custom_cni_chart_values: {} + +## OPTION 2 EXAMPLE - Cilium deployed from official public Helm chart +# custom_cni_chart_namespace: kube-system +# custom_cni_chart_release_name: cilium +# custom_cni_chart_repository_name: cilium +# custom_cni_chart_repository_url: https://helm.cilium.io +# custom_cni_chart_ref: cilium/cilium +# custom_cni_chart_version: 1.14.3 +# custom_cni_chart_values: +# cluster: +# name: "cilium-demo" diff --git a/ansible/inventory/group_vars/k8s_cluster/k8s-net-flannel.yml b/ansible/inventory/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/ansible/inventory/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/ansible/inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/ansible/inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..c241a76 --- /dev/null +++ b/ansible/inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,63 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false + +## enable interconnection to an existing IC database server. +kube_ovn_ic_enable: false +kube_ovn_ic_autoroute: true +kube_ovn_ic_dbhost: "127.0.0.1" +kube_ovn_ic_zone: "kubernetes" diff --git a/ansible/inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml b/ansible/inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..21947a9 --- /dev/null +++ b/ansible/inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,73 @@ +# See roles/network_plugin/kube-router/defaults/main.yml + +# Kube router version +# Default to v2 +# kube_router_version: "v2.0.0" +# Uncomment to use v1 (Deprecated) +# kube_router_version: "v1.6.0" + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Enables BGP graceful restarts +# kube_router_bgp_graceful_restart: true + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/ansible/inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml b/ansible/inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/ansible/inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/ansible/inventory/group_vars/k8s_cluster/k8s-net-weave.yml b/ansible/inventory/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/ansible/inventory/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/ansible/inventory/group_vars/kubespray/docker.yaml b/ansible/inventory/group_vars/kubespray/docker.yaml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/ansible/inventory/group_vars/kubespray/docker.yaml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/ansible/inventory/group_vars/kubespray/etcd.yaml b/ansible/inventory/group_vars/kubespray/etcd.yaml new file mode 100644 index 0000000..39600c3 --- /dev/null +++ b/ansible/inventory/group_vars/kubespray/etcd.yaml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host diff --git a/ansible/inventory/group_vars/kubespray/kubespray.yaml b/ansible/inventory/group_vars/kubespray/kubespray.yaml new file mode 100644 index 0000000..c7f7628 --- /dev/null +++ b/ansible/inventory/group_vars/kubespray/kubespray.yaml @@ -0,0 +1,139 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere', 'huaweicloud' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies and custom CA for https_proxy if needed +# http_proxy: "" +# https_proxy: "" +# https_proxy_cert_file: "" + +## Refer to roles/kubespray-defaults/defaults/main/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false + +## If enabled it will allow kubespray to attempt setup even if the distribution is not supported. For unsupported distributions this can lead to unexpected failures in some cases. +allow_unsupported_distribution_setup: false diff --git a/ansible/inventory/hosts.yaml b/ansible/inventory/hosts.yaml index 6f1e525..f21e516 100644 --- a/ansible/inventory/hosts.yaml +++ b/ansible/inventory/hosts.yaml @@ -6,18 +6,66 @@ all: ansible_python_interpreter: "{{ python_interpreter }}" pxe-server: ipv4: 192.168.122.28 - k8s-dev1-ctrl: + node1: + ip: 192.168.122.128 ipv4: 192.168.122.128 - k8s-dev1-worker1: + access_ip: 192.168.122.128 + ansible_host: 192.168.122.128 + node2: + ip: 192.168.122.129 ipv4: 192.168.122.129 - k8s-dev1-worker2: + access_ip: 192.168.122.129 + ansible_host: 192.168.122.129 + node3: + ip: 192.168.122.130 ipv4: 192.168.122.130 + access_ip: 192.168.122.130 + ansible_host: 192.168.122.130 + node4: + ip: 192.168.122.131 + ipv4: 192.168.122.131 + access_ip: 192.168.122.131 + ansible_host: 192.168.122.131 + node5: + ip: 192.168.122.132 + ipv4: 192.168.122.132 + access_ip: 192.168.122.132 + ansible_host: 192.168.122.132 children: libvirt: hosts: - k8s-dev1-ctrl: - k8s-dev1-worker1: - k8s-dev1-worker2: + node1: + node2: + node3: + node4: + node5: + kubernetes: + hosts: + node1: + node2: + node3: + node4: + node5: + kubemasters: + hosts: + node1: + node2: + node3: + k8s_cluster: + children: + kubernetes: + kube_node: + children: + kubernetes: + kube_control_plane: + children: + kubemasters: + etcd: + children: + kubemasters: + Kubespray: + children: + kubernetes: admin: hosts: boogie: diff --git a/ansible/roles/libvirt/tasks/main.yaml b/ansible/roles/libvirt/tasks/main.yaml index dc35ce4..605fc37 100644 --- a/ansible/roles/libvirt/tasks/main.yaml +++ b/ansible/roles/libvirt/tasks/main.yaml @@ -56,6 +56,12 @@ state: running delegate_to: "{{ vm_host }}" when: pxe_done + - name: Pause execution until Debian installation is complete + ansible.builtin.pause: + prompt: | + "Wait for the Debian installation to complete, then press Enter to continue." + delegate_to: "{{ vm_host }}" + when: pxe_done when: state == 'present' # If the state for the current host is set to `absent` in the `state.ini` file: diff --git a/ansible/site.yaml b/ansible/site.yaml index ffd05b6..29d35c9 100644 --- a/ansible/site.yaml +++ b/ansible/site.yaml @@ -25,3 +25,6 @@ - role: libvirt vars: pxe_done: true + +- name: Install Kubernetes with Kubespray + ansible.builtin.import_playbook: kubespray/playbooks/cluster.yml