diff --git a/playbooks/reconfigure_control_plane.yml b/playbooks/reconfigure_control_plane.yml new file mode 100644 index 00000000000..ada66eaf7b1 --- /dev/null +++ b/playbooks/reconfigure_control_plane.yml @@ -0,0 +1,25 @@ +--- +- name: Common tasks for every playbooks + import_playbook: boilerplate.yml + +- name: Gather facts + import_playbook: facts.yml + +- name: Tasks for reconfigure control plane + hosts: kube_control_plane + gather_facts: false + pre_tasks: + - name: Include vars from `kubespray-defaults` main + run_once: true + ansible.builtin.include_vars: + dir: "{{ playbook_dir }}/../roles/kubespray-defaults/defaults/main" + - name: Include vars from `kubespray-defaults` vars + run_once: true + ansible.builtin.include_vars: + dir: "{{ playbook_dir }}/../roles/kubespray-defaults/vars" + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + vars: + skip_downloads: true + roles: + - { role: kubernetes/control-plane, tags: ['facts'] } + - { role: kubernetes/reconfigure-control-plane } diff --git a/reconfigure-control-plane.yml b/reconfigure-control-plane.yml new file mode 100644 index 00000000000..0ceb3c33e10 --- /dev/null +++ b/reconfigure-control-plane.yml @@ -0,0 +1,3 @@ +--- +- name: Reconfigure control plane + ansible.builtin.import_playbook: playbooks/reconfigure_control_plane.yml diff --git a/roles/kubernetes/reconfigure-control-plane/defaults/main.yml b/roles/kubernetes/reconfigure-control-plane/defaults/main.yml new file mode 100644 index 00000000000..c06f3792efc --- /dev/null +++ b/roles/kubernetes/reconfigure-control-plane/defaults/main.yml @@ -0,0 +1,8 @@ +--- +kube_config_dir: "/etc/kubernetes" +kube_apiserver_port: "6443" +kube_reconfig_dir: "{{ kube_config_dir }}/reconfigure" +cm_name: "kubeadm-config" +cm_namespace: "kube-system" +first_master_node: "groups['kube_control_plane'][0]" +cm_cluster_config: "{{ kube_reconfig_dir }}/cm-cluster-config.yaml" diff --git a/roles/kubernetes/reconfigure-control-plane/tasks/main.yml b/roles/kubernetes/reconfigure-control-plane/tasks/main.yml new file mode 100644 index 00000000000..ec453524f85 --- /dev/null +++ b/roles/kubernetes/reconfigure-control-plane/tasks/main.yml @@ -0,0 +1,127 @@ +--- +- name: Ensure all nodes are in Ready state + run_once: true + block: + - name: Get all node status + raw: "{{ bin_dir }}/kubectl get nodes --no-headers" + register: all_nodes_status + + - name: Get all node status different than Ready + raw: echo "{{ all_nodes_status.stdout }}" | grep -v ' Ready' + register: nodes_status + + - name: Show not ready nodes + debug: msg="{{ nodes_status.stdout_lines }}" + when: nodes_status.stdout != "\r\n" + + - name: Confirm Reconfigure + pause: + prompt: "Some nodes are not in Ready state. Type 'yes' to continue with reconfigure" + register: pause_result + when: nodes_status.stdout != "\r\n" + + - name: Fail if user does not confirm reconfigure + fail: + msg: "Reconfigure control pĺane will not continue" + when: pause_result.user_input | default('yes') != 'yes' + +- name: Check API is up + uri: + url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz" + validate_certs: false + when: first_master_node + register: _result + retries: 60 + delay: 5 + until: _result.status == 200 + +- name: Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + get_attributes: false + get_checksum: false + get_mime: false + register: kubeadm_already_run + +- name: Set reconfig path + set_fact: + kube_reconfig_path: "{{ kube_reconfig_dir }}/cluster-config-{{ now(utc=true, fmt='%Y%m%d-%H%M%S') }}.yaml" + +- name: Backup Kubernetes configuration files + archive: + path: "{{ kube_config_dir }}" + dest: "{{ kube_config_dir }}/kubernetes-config-{{ ansible_date_time.date }}.tar.gz" + format: gz + +- name: Create backup directory if it doesn't exist + file: + path: "{{ kube_reconfig_dir }}" + state: directory + mode: '0755' + owner: root + group: root + +- name: Backup ConfigMap + when: first_master_node + run_once: true + raw: "{{ bin_dir }}/kubectl get cm -n {{ cm_namespace }} {{ cm_name }} -o yaml > {{ kube_reconfig_path }}.bk" + register: cm_backup + failed_when: cm_backup.rc != 0 + +- name: Delete ConfigMap + when: first_master_node and cm_backup.rc == 0 + run_once: true + raw: "{{ bin_dir }}/kubectl delete cm -n {{ cm_namespace }} {{ cm_name }}" + register: cm_delete + failed_when: cm_delete.rc != 0 + +- name: Create reconfigure kubeadm config + template: + src: "{{ playbook_dir }}/../roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2" + dest: "{{ kube_reconfig_path }}" + mode: "0640" + +- name: Fetch kubeadm config to control node + fetch: + src: "{{ kube_reconfig_path }}" + dest: "/tmp/" + flat: yes + +- name: Load kubeadm config + become: yes + set_fact: + cluster_config_manifest: "{{ lookup('file', '/tmp/' + (kube_reconfig_path | basename)) | from_yaml_all | list }}" + +- name: Save cluster configuration + copy: + content: "{{ cluster_config_manifest | selectattr('kind', 'eq', 'ClusterConfiguration') | first | to_yaml }}" + dest: "{{ item }}" + loop: + - "{{ cm_cluster_config }}" + - "{{ kube_reconfig_path }}" + +- name: Prepare ClusterConfiguration configmap + run_once: true + when: first_master_node + lineinfile: + path: "{{ cm_cluster_config }}" + state: absent + regexp: "{{ item }}" + loop: + - "^apiVersion:" + - "^kind: ClusterConfiguration" + +- name: Create new configmap for ClusterConfiguration + raw: "{{ bin_dir }}/kubectl create configmap -n {{ cm_namespace }} {{ cm_name }} --from-file=ClusterConfiguration={{ cm_cluster_config }}" + run_once: true + when: first_master_node + +- name: Reconfigure control-plane + when: + - kubeadm_already_run.stat.exists + run_once: true + # We'll iterate over each node in the kube_control_plane group: + loop: "{{ groups['kube_control_plane'] }}" + loop_control: + loop_var: cp_node + include_tasks: reconfigure_block.yml diff --git a/roles/kubernetes/reconfigure-control-plane/tasks/reconfigure_block.yml b/roles/kubernetes/reconfigure-control-plane/tasks/reconfigure_block.yml new file mode 100644 index 00000000000..f5a7461b25c --- /dev/null +++ b/roles/kubernetes/reconfigure-control-plane/tasks/reconfigure_block.yml @@ -0,0 +1,41 @@ +- block: + - name: Write new manifest for kubernetes control plane components + raw: "kubeadm init phase control-plane all --config {{ kube_reconfig_path }}" + + - name: Reconfigure local etcd + raw: "kubeadm init phase etcd local --config {{ kube_reconfig_path }}" + + - name: Delete kube-apiserver pod + raw: "{{ bin_dir }}/kubectl delete pod -n kube-system kube-apiserver-{{ ansible_hostname }}" + ignore_errors: yes + + - name: Wait for kube-apiserver pod to be running + raw: "{{ bin_dir }}/kubectl get pod -n kube-system kube-apiserver-{{ ansible_hostname }} -o jsonpath='{.status.phase}'" + register: check_apiserver + retries: 10 + delay: 30 + until: check_apiserver.stdout == 'Running' + + - name: Delete kube-controller-manager pod + raw: "{{ bin_dir }}/kubectl delete pod -n kube-system kube-controller-manager-{{ ansible_hostname }}" + ignore_errors: yes + + - name: Wait for kube-controller-manager pod to be running + raw: "{{ bin_dir }}/kubectl get pod -n kube-system kube-controller-manager-{{ ansible_hostname }} -o jsonpath='{.status.phase}'" + register: check_controller_manager + retries: 10 + delay: 30 + until: check_controller_manager.stdout == 'Running' + + - name: Delete kube-scheduler pod + raw: "{{ bin_dir }}/kubectl delete pod -n kube-system kube-scheduler-{{ ansible_hostname }}" + ignore_errors: yes + + - name: Wait for kube-scheduler pod to be running + raw: "{{ bin_dir }}/kubectl get pod -n kube-system kube-scheduler-{{ ansible_hostname }} -o jsonpath='{.status.phase}'" + register: check_scheduler + retries: 10 + delay: 30 + until: check_scheduler.stdout == 'Running' + delegate_to: "{{ cp_node }}" + when: kubeadm_already_run.stat.exists | default(false)