diff --git a/README.md b/README.md index 7bf81a3..eb86f42 100644 --- a/README.md +++ b/README.md @@ -17,20 +17,31 @@ What ansible-kubeadm expect to be done and will not do: - remove unattented-upgrade - configure CNI + ## Quickstart see [Quickstart](docs/quickstart.md) + ## Configuration If you want a customized (ansible-)kubeadm experience there is a number of variables you can use: [Variables reference](docs/variables.md) + +## Guides + +Some operation has their own guided page: + +- [join nodes](docs/guides/join_nodes.md) + + ## Flow If you're looking for what ansible-kubeadm is doing step-by-step, [hooks && plugins](docs/hooks_and_plugins.md) is a good way to start. + ## Migration planning Long term migration plan, [*] to indicate current phase diff --git a/docs/guides/join_nodes.md b/docs/guides/join_nodes.md new file mode 100644 index 0000000..f323c42 --- /dev/null +++ b/docs/guides/join_nodes.md @@ -0,0 +1,56 @@ +# To join worker-only nodes + +**Note** : For control plane nodes, see dedicated [section](join_nodes.md#to-join-control-plane-nodes) + +Let's assume that you have a cluster with two nodes and that you want to add a third node `node-3` +You can join multiple worker node at once with this procedure, + +### Add node to the inventory + +First, add the node to the inventory like the following inventory: + +``` +[kube_control_plane] +cp-1 +cp-2 +cp-3 + +[kube_workers] +node-1 +node-2 +node-3 +``` + + +### [optional] Deploy local apiserver proxy + +If you don't have provision a load-balancer and require the local haproxy to be deployed: + +``` +ansible-playbook -i inventory enix.kubeadm.00_apiserver_proxy -e limit=nodes-3 +``` +You need to specify the `limit` variable via "extra-vars", because `-l` cannot really work in the context of ansible-kubeadm +(you need to connect to all the masters to get the IP needed to configure the loadbalancer) + +### Joining nodes + +You can join a node and skip other changes on other nodes by specify the limit variable. + +``` +ansible-play -i inventory.cfg enix.kubeadm.01_site -e limit=nodes-3 +``` + + + +### Create bootstrap-token + +Then create a bootstrap token by adding using the `bootstrap_token` tag. +Don't use a limit that skip control plane nodes. + +``` +ansible-playbook -i inventory.cfg enix.kubeadm.01_site -t bootstrap_token +``` + +No need to retrieve it by yourself, it will be discovered when joining the node +The token has a validity of 1H, so you don't need to repeat this step each time you try to join nodes + diff --git a/playbooks/00_apiserver_proxy.yml b/playbooks/00_apiserver_proxy.yml index 7aba9db..be59d83 100644 --- a/playbooks/00_apiserver_proxy.yml +++ b/playbooks/00_apiserver_proxy.yml @@ -13,7 +13,7 @@ roles: - role: find_ip -- hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}' +- hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}{{ ":" ~ limit if limit is defined else "" }}' any_errors_fatal: '{{ any_errors_fatal|default(true) }}' pre_tasks: - include_role: @@ -31,7 +31,7 @@ vars: kubeadm_hook_list: ['post_apiserver_proxy'] -- hosts: 'haproxy_upgrade_group:&{{ kube_cp_group|default("kube_control_plane") }}' +- hosts: 'haproxy_upgrade_group:&{{ kube_cp_group|default("kube_control_plane") }}{{ ":" ~ limit if limit is defined else "" }}' serial: '{{ upgrade_cp_serial|default(1) }}' any_errors_fatal: '{{ any_errors_fatal|default(true) }}' pre_tasks: @@ -47,7 +47,7 @@ vars: kubeadm_hook_list: ['post_proxy_upgrade_haproxy'] -- hosts: 'haproxy_upgrade_group:&{{ kube_worker_group|default("kube_workers") }}' +- hosts: 'haproxy_upgrade_group:&{{ kube_worker_group|default("kube_workers") }}{{ ":" ~ limit if limit is defined else "" }}' serial: '{{ upgrade_worker_serial|default(1) }}' any_errors_fatal: '{{ any_errors_fatal|default(true) }}' pre_tasks: diff --git a/playbooks/01_site.yml b/playbooks/01_site.yml index f54388a..41f85e7 100644 --- a/playbooks/01_site.yml +++ b/playbooks/01_site.yml @@ -25,7 +25,7 @@ vars: kubeadm_hook_list: ['post_preflight_cp'] -- hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}' +- hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}{{ ":" ~ limit if limit is defined else "" }}' any_errors_fatal: '{{ any_errors_fatal|default(true) }}' roles: - role: find_ip @@ -42,7 +42,7 @@ roles: - role: process_reasons -- hosts: '{{ kube_cp_group|default("kube_control_plane") }}' +- hosts: '{{ kube_cp_group|default("kube_control_plane") }}{{ ":" ~ limit if limit is defined else "" }}{{ ":" ~ limit if limit is defined else "" }}' any_errors_fatal: '{{ any_errors_fatal|default(true) }}' gather_facts: false roles: @@ -82,6 +82,8 @@ kubeadm_hook_list: ['pre_config_update'] roles: - role: bootstrap_token + tags: ['bootstrap_token'] + - role: upload_certs - role: kubeadm_configs_update tasks: - include_role: @@ -90,7 +92,7 @@ kubeadm_hook_list: ['post_config_update'] # This has to be overly cautious on package upgade -- hosts: cp_upgrade +- hosts: 'cp_upgrade{{ ":" ~ limit if limit is defined else "" }}' any_errors_fatal: '{{ any_errors_fatal|default(true) }}' gather_facts: false pre_tasks: @@ -116,7 +118,7 @@ # Upgrade conrol-plane nodes - name: 'Upgrade to control plane nodes' - hosts: '{{ kube_cp_group|default("kube_control_plane") }}:&nodes_upgrade' + hosts: '{{ kube_cp_group|default("kube_control_plane") }}:&nodes_upgrade{{ ":" ~ limit if limit is defined else "" }}' any_errors_fatal: '{{ any_errors_fatal|default(true) }}' serial: '{{ upgrade_cp_serial|default(1) }}' gather_facts: false @@ -145,7 +147,7 @@ # Upgrade worker nodes - name: 'Upgrade to workers nodes' - hosts: '{{ kube_worker_group|default("kube_workers") }}:&nodes_upgrade' + hosts: '{{ kube_worker_group|default("kube_workers") }}:&nodes_upgrade{{ ":" ~ limit if limit is defined else "" }}' any_errors_fatal: '{{ any_errors_fatal|default(true) }}' serial: '{{ upgrade_worker_serial|default(1) }}' gather_facts: false @@ -172,7 +174,7 @@ # Join control-plane nodes - name: 'Join new control plane nodes' - hosts: '{{ kube_cp_group|default("kube_control_plane") }}' + hosts: '{{ kube_cp_group|default("kube_control_plane") }}{{ ":" ~ limit if limit is defined else "" }}' any_errors_fatal: '{{ any_errors_fatal|default(true) }}' gather_facts: false vars: @@ -196,9 +198,10 @@ # Join worker nodes - name: 'Join new workers nodes' - hosts: '{{ kube_worker_group|default("kube_workers") }}' + hosts: '{{ kube_worker_group|default("kube_workers") }}{{ ":" ~ limit if limit is defined else "" }}' any_errors_fatal: '{{ any_errors_fatal|default(true) }}' gather_facts: false + tags: ['join'] pre_tasks: - include_role: name: hooks_call @@ -215,7 +218,7 @@ kubeadm_hook_list: ['post_workers_join', 'post_nodes_join'] - name: 'Finally executing post_run hook on all hosts' - hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}' + hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}{{ ":" ~ limit if limit is defined else "" }}' any_errors_fatal: '{{ any_errors_fatal|default(true) }}' gather_facts: false tasks: diff --git a/roles/bootstrap_token/defaults/main.yml b/roles/bootstrap_token/defaults/main.yml index 930611f..65d96ef 100644 --- a/roles/bootstrap_token/defaults/main.yml +++ b/roles/bootstrap_token/defaults/main.yml @@ -1,8 +1,2 @@ --- -sensitive_debug: false -cluster_config: {} - -kubeadm_config_yaml: '/tmp/kubeadm-config-{{ansible_date_time.iso8601 }}.yaml' - -python2_openssl: python-openssl -python3_openssl: python3-openssl +_valid_bootstrap_tokens: [] diff --git a/roles/bootstrap_token/meta/main.yml b/roles/bootstrap_token/meta/main.yml index 9589dd3..85b97f8 100644 --- a/roles/bootstrap_token/meta/main.yml +++ b/roles/bootstrap_token/meta/main.yml @@ -1,5 +1,6 @@ --- dependencies: + - role: common_vars - role: kubectl_module galaxy_info: author: Julien Girardin diff --git a/roles/bootstrap_token/tasks/bootstrap_token.yml b/roles/bootstrap_token/tasks/bootstrap_token.yml deleted file mode 100644 index 3172c21..0000000 --- a/roles/bootstrap_token/tasks/bootstrap_token.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -- name: 'Get list of bootstrap token' - kubectl: - state: get - resource_type: secret - namespace: kube-system - extra_args: '--field-selector=type=bootstrap.kubernetes.io/token' - kubeconfig: /etc/kubernetes/admin.conf - run_once: true - delegate_to: '{{ kubeadm_host }}' - register: bootstrap_tokens - when: - - not(found_kubectl.rc == 1 and ansible_check_mode) - -- name: 'Display all bootstrap tokens' - debug: - var: bootstrap_tokens - verbosity: 1 - run_once: true - -- name: 'Filter expire token' - set_fact: - valid_bootstrap_tokens: >- - {%- if ansible_collection_name is defined and ansible_collection_name is not none -%} - {%- set filter_name = "enix.kubeadm.bootstrap_token_valid" -%} - {%- else -%} - {%- set filter_name = "bootstrap_token_valid" -%} - {%- endif -%} - {{ [bootstrap_tokens_dry_run["items"] - |selectattr('data.usage-bootstrap-authentication', 'defined')|list] - |map(filter_name)|first }} - run_once: true - vars: - # "items" cannot be defaulted easily as jinja fallback on using method instead - bootstrap_tokens_dry_run: "{{ {'items': []}|combine(bootstrap_tokens) }}" - -- name: 'Display valid bootstrap tokens' - debug: - var: valid_bootstrap_tokens - verbosity: 1 - run_once: true diff --git a/roles/bootstrap_token/tasks/main.yaml b/roles/bootstrap_token/tasks/main.yaml index a380d4f..7bd2ce3 100644 --- a/roles/bootstrap_token/tasks/main.yaml +++ b/roles/bootstrap_token/tasks/main.yaml @@ -1,83 +1,28 @@ --- -- name: 'Select candidate host to run init' +- name: 'Find nodes to join' set_fact: - kubeadm_host: '{{ groups.cp_running|default(ansible_play_hosts, true)|first }}' - -- name: 'Retrieve a valid bootstrap token' - import_tasks: bootstrap_token.yml - -- name: 'Create bootstrap token if no valid found' - command: kubeadm token create - run_once: true - delegate_to: '{{ kubeadm_host }}' - when: valid_bootstrap_tokens|length == 0 - -- name: 'Retrieve a valid bootstrap token' - import_tasks: bootstrap_token.yml - when: valid_bootstrap_tokens|length == 0 - -# TODO: fix two following tasks to be more platform dependent -- name: 'Install python-openssl' - package: - name: >- - {%- if ansible_python.version.major > 2 -%} - {{ python3_openssl }} - {%- else -%} - {{ python2_openssl }} - {%- endif -%} - state: present - run_once: true - delegate_to: '{{ kubeadm_host }}' - -- name: 'Get info from ca' - openssl_certificate_info: - path: /etc/kubernetes/pki/ca.crt - run_once: true - delegate_to: '{{ kubeadm_host }}' - register: ca_info - when: not(groups.cp_init is defined and ansible_check_mode) - -- name: 'Display Kubernetes CA(cert) properties' - debug: - var: ca_info - verbosity: 1 - run_once: true - -- name: 'List current nodes' - kubectl: - state: get - resource_type: nodes - kubeconfig: /etc/kubernetes/admin.conf - run_once: true - delegate_to: '{{ kubeadm_host }}' - register: current_nodes - when: - - not(found_kubectl.rc == 1 and ansible_check_mode) - -- name: 'Compute list of "to-join" nodes' - set_fact: - # "items" cannot be defaulted easily as jinja fallback on using method instead - to_join_cp: >- - {{ ansible_play_hosts|difference( - ({"items": []}|combine(current_nodes))["items"]|map(attribute="metadata.name")) }} - cert_encryption_key: >- - {{ lookup('password', '/dev/null length=64 chars=hexdigits') }} - run_once: true - -- name: 'Display list of node that need to be joined' - debug: - var: to_join_cp - verbosity: 1 - run_once: true - -- name: 'Upload certificates if control-plane node need to be joined' - command: >- - kubeadm init phase upload-certs - --upload-certs - --certificate-key {{ cert_encryption_key }} - environment: - KUBECONFIG: '/etc/kubernetes/admin.conf' - no_log: '{{ sensitive_debug|bool }}' - run_once: true - delegate_to: '{{ kubeadm_host }}' - when: to_join_cp|length > 0 + nodes_to_join: >- + {{ q('inventory_hostnames', kube_cp_group ~ ':' ~ kube_worker_group) + |map('extract', hostvars) + |selectattr('_kubelet_config_stat', 'defined') + |rejectattr('_kubelet_config_stat.stat.exists') + |map(attribute='inventory_hostname')|list }} + run_once: true + +- name: 'Create bootstrap token' + when: nodes_to_join|length > 0 + block: + - name: 'Retrieve a valid bootstrap token' + import_role: + name: bootstrap_token_get + + - name: 'Create bootstrap token if no valid found' + command: kubeadm token create + run_once: true + delegate_to: '{{ cp_node }}' + when: _valid_bootstrap_tokens|length == 0 + + - name: 'Retrieve a valid bootstrap token' + import_role: + name: bootstrap_token_get + when: _valid_bootstrap_tokens|length == 0 diff --git a/roles/bootstrap_token_get/meta/main.yml b/roles/bootstrap_token_get/meta/main.yml new file mode 100644 index 0000000..97a58b9 --- /dev/null +++ b/roles/bootstrap_token_get/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: common_vars diff --git a/roles/bootstrap_token_get/tasks/main.yml b/roles/bootstrap_token_get/tasks/main.yml new file mode 100644 index 0000000..f45e857 --- /dev/null +++ b/roles/bootstrap_token_get/tasks/main.yml @@ -0,0 +1,40 @@ +--- +- name: 'Fetch bootstrap token' + run_once: true + delegate_to: '{{ cp_node }}' + block: + - name: 'Get list of bootstrap token' + kubectl: + state: get + resource_type: secret + namespace: kube-system + extra_args: '--field-selector=type=bootstrap.kubernetes.io/token' + kubeconfig: /etc/kubernetes/admin.conf + register: _bootstrap_tokens + when: + - not(groups.cp_running|default([])|length == 0 and ansible_check_mode) + + - name: 'Display all bootstrap tokens' + debug: + var: _bootstrap_tokens + verbosity: 1 + + - name: 'Filter expire token' + set_fact: + _valid_bootstrap_tokens: >- + {%- if ansible_collection_name is defined and ansible_collection_name is not none -%} + {%- set filter_name = "enix.kubeadm.bootstrap_token_valid" -%} + {%- else -%} + {%- set filter_name = "bootstrap_token_valid" -%} + {%- endif -%} + {{ [bootstrap_tokens_dry_run["items"] + |selectattr('data.usage-bootstrap-authentication', 'defined')|list] + |map(filter_name)|first }} + vars: + # "items" cannot be defaulted easily as jinja fallback on using method instead + bootstrap_tokens_dry_run: "{{ {'items': []}|combine(_bootstrap_tokens) }}" + + - name: 'Display valid bootstrap tokens' + debug: + var: _valid_bootstrap_tokens + verbosity: 1 diff --git a/roles/common_vars/defaults/main.yml b/roles/common_vars/defaults/main.yml index 188eef5..a7ffa0a 100644 --- a/roles/common_vars/defaults/main.yml +++ b/roles/common_vars/defaults/main.yml @@ -1,4 +1,5 @@ --- +_kubelet_config_path: /var/lib/kubelet/config.yaml enable_kubeadm_patches: true kubeadm_ignore_preflight_errors: [] kubeadm_patch_dir: /etc/kubernetes/patches @@ -6,3 +7,6 @@ kube_cp_group: kube_control_plane kube_worker_group: kube_workers cp_node: '{{ (groups.cp_running|default(groups[kube_cp_group]))|first }}' + +_target_kube_version: '{{ hostvars[cp_node]._target_kube_version }}' +_target_kubeadm_version: '{{ hostvars[cp_node]._target_kubeadm_version }}' diff --git a/roles/join_nodes/defaults/main.yml b/roles/join_nodes/defaults/main.yml index eb8f331..e7e8c19 100644 --- a/roles/join_nodes/defaults/main.yml +++ b/roles/join_nodes/defaults/main.yml @@ -1,2 +1,3 @@ --- _control_plane: false +_kubernetes_ca_cert: /etc/kubernetes/pki/ca.crt diff --git a/roles/join_nodes/meta/main.yml b/roles/join_nodes/meta/main.yml index d8049d6..4534c20 100644 --- a/roles/join_nodes/meta/main.yml +++ b/roles/join_nodes/meta/main.yml @@ -2,6 +2,9 @@ dependencies: - role: kubectl_module - role: common_vars + - role: bootstrap_token_get + delegate_to: '{{ cp_node }}' + run_once: true galaxy_info: author: Julien Girardin description: Join node to a kubernetes cluster diff --git a/roles/join_nodes/tasks/ca_info.yml b/roles/join_nodes/tasks/ca_info.yml new file mode 100644 index 0000000..ed195d8 --- /dev/null +++ b/roles/join_nodes/tasks/ca_info.yml @@ -0,0 +1,14 @@ +--- +- name: "Extract public key from kubernetes CA" + command: openssl x509 -noout -pubkey -in {{ _kubernetes_ca_cert }} + check_mode: false + changed_when: false + delegate_to: '{{ cp_node }}' + run_once: true + register: _kubernetes_ca_fingerprint + +- name: "Compute sha256 of fingertprint" + set_fact: + ca_cert_hash: >- + {{ _kubernetes_ca_fingerprint.stdout|regex_replace('[- A-Z]+\n([+/\w\n]+)\n[- A-Z]+', '\g<1>')|b64decode|hash('sha256') }} + run_once: true diff --git a/roles/join_nodes/tasks/main.yml b/roles/join_nodes/tasks/main.yml index d28eca6..f148362 100644 --- a/roles/join_nodes/tasks/main.yml +++ b/roles/join_nodes/tasks/main.yml @@ -1,8 +1,6 @@ --- - name: 'Retrieve variable from control-plane' set_fact: - ca_info: '{{ hostvars[cp_node].ca_info }}' - valid_bootstrap_tokens: '{{ hostvars[cp_node].valid_bootstrap_tokens }}' control_plane_endpoint: '{{ hostvars[cp_node].control_plane_endpoint }}' _control_plane: '{{ _control_plane }}' @@ -10,21 +8,9 @@ include_role: name: find_ip -- name: 'List all node' - kubectl: - state: get - resource_type: nodes - kubeconfig: /etc/kubernetes/admin.conf - run_once: true - delegate_to: '{{ cp_node }}' - register: current_nodes - when: - - not(hostvars[cp_node].found_kubectl.rc == 1 and ansible_check_mode) - -- name: 'Display current node' - debug: - var: current_nodes - verbosity: 1 +- name: "Get CA fingerprint " + import_tasks: ca_info.yml + when: not(groups.cp_running|default([])|length == 0 and ansible_check_mode) - name: 'Display JoinConfig' debug: @@ -42,11 +28,8 @@ {%- endif -%} args: stdin: '{{ lookup("template", role_path ~ "/templates/join_config.j2") }}' + creates: '{{ _kubelet_config_path }}' register: kubeadm_node_join - when: ansible_nodename not in nodes_list - vars: - # "items" cannot be defaulted easily as jinja fallback on using method instead - nodes_list: "{{ ({'items': []}|combine(current_nodes))['items']|map(attribute='metadata.name')|list }}" - name: 'Display output of "kubeadm join"' debug: diff --git a/roles/join_nodes/templates/join_config.j2 b/roles/join_nodes/templates/join_config.j2 index e12ae60..eaf6317 100644 --- a/roles/join_nodes/templates/join_config.j2 +++ b/roles/join_nodes/templates/join_config.j2 @@ -1,8 +1,8 @@ -{%- if valid_bootstrap_tokens|length == 0 -%} +{%- if _valid_bootstrap_tokens|length == 0 -%} {#- Use a dummy token for dry-run if it didn't exist -#} {%- set token_data = {"token-id": "ZHJ5LXJ1biB0b2tlbi1pZAo=", "token-secret": "ZHJ5LXJ1biB0b2tlbi1zZWNyZXQK"} -%} {%- else -%} -{%- set token_data = (valid_bootstrap_tokens|first).data -%} +{%- set token_data = (_valid_bootstrap_tokens|first).data -%} {%- endif -%} --- {% if _target_kube_version is version("1.22", ">=") -%} @@ -15,7 +15,7 @@ discovery: bootstrapToken: token: "{{ token_data['token-id']|b64decode }}.{{ token_data['token-secret']|b64decode }}" caCertHashes: - - "sha256:{{ (ca_info.public_key_fingerprints.sha256|default('dry-run-sha256')).replace(':', '') }}" + - "sha256:{{ (ca_cert_hash|default('dry-run-sha256')) }}" {% if control_plane_endpoint %} apiServerEndpoint: "{{ control_plane_endpoint }}" {% else %} diff --git a/roles/packages/meta/main.yml b/roles/packages/meta/main.yml index 94c1f15..6e32029 100644 --- a/roles/packages/meta/main.yml +++ b/roles/packages/meta/main.yml @@ -1,19 +1,4 @@ --- dependencies: - role: packages_common -galaxy_info: - author: Julien Girardin - description: Install kubernetes related packages - company: Enix - license: Apache - min_ansible_version: 2.7 - platforms: - - name: Ubuntu - versions: - - 18.04 - - 20.04 - galaxy_tags: - - kubernetes - - kubeadm - - kubelet - - kubectl + - role: common_vars diff --git a/roles/preflight_check_nodes/defaults/main.yml b/roles/preflight_check_nodes/defaults/main.yml index ec9f6b9..fd8a40c 100644 --- a/roles/preflight_check_nodes/defaults/main.yml +++ b/roles/preflight_check_nodes/defaults/main.yml @@ -1,5 +1,3 @@ --- _upgrade_reasons: {} _failure_reasons: {} - -kubelet_config_path: /var/lib/kubelet/config.yaml diff --git a/roles/preflight_check_nodes/tasks/check_kubelet_config.yml b/roles/preflight_check_nodes/tasks/check_kubelet_config.yml index 45c560b..2eacdf9 100644 --- a/roles/preflight_check_nodes/tasks/check_kubelet_config.yml +++ b/roles/preflight_check_nodes/tasks/check_kubelet_config.yml @@ -1,12 +1,12 @@ --- - name: 'Test if local kubelet config exists' stat: - path: '{{ kubelet_config_path }}' + path: '{{ _kubelet_config_path }}' register: _kubelet_config_stat - name: 'Fetch local kubelet config if exist' slurp: - path: '{{ kubelet_config_path }}' + path: '{{ _kubelet_config_path }}' register: _kubelet_config_fetch when: _kubelet_config_stat.stat.exists diff --git a/roles/upload_certs/defaults/main.yml b/roles/upload_certs/defaults/main.yml new file mode 100644 index 0000000..1cd4e8f --- /dev/null +++ b/roles/upload_certs/defaults/main.yml @@ -0,0 +1,2 @@ +--- +sensitive_debug: false diff --git a/roles/upload_certs/meta/main.yml b/roles/upload_certs/meta/main.yml new file mode 100644 index 0000000..97a58b9 --- /dev/null +++ b/roles/upload_certs/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: common_vars diff --git a/roles/upload_certs/tasks/main.yml b/roles/upload_certs/tasks/main.yml new file mode 100644 index 0000000..ba2e6f3 --- /dev/null +++ b/roles/upload_certs/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- name: 'Find nodes to join' + set_fact: + _cp_to_join: >- + {{ q('inventory_hostnames', kube_cp_group) + |map('extract', hostvars) + |rejectattr('_kubelet_config_stat.stat.exists') + |map(attribute='inventory_hostname')|list }} + cert_encryption_key: >- + {{ lookup('password', '/dev/null length=64 chars=hexdigits') }} + run_once: true + +- name: 'Upload certificates if control-plane node need to be joined' + command: >- + kubeadm init phase upload-certs + --upload-certs + --certificate-key {{ cert_encryption_key }} + no_log: '{{ not sensitive_debug|bool }}' + run_once: true + delegate_to: '{{ cp_node }}' + when: _cp_to_join|length > 0 diff --git a/tests/conftest.py b/tests/conftest.py index 61e6d25..1275190 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -95,8 +95,8 @@ def vagrant(tmpdir): return LocalVagrant(inventory_dir_copy=tmpdir) -@then("Set cluster {variable}={value}") -@given("The cluster {variable)={value}") +@then(parsers.parse("Set cluster {variable} = {value}")) +@given(parsers.parse("The cluster {variable} = {value}")) def cluster_set_param(provider, variable, value): provider.vars[variable] = value # Refresh infrastructure @@ -160,7 +160,7 @@ def ansible_extra_args(request): @when( parsers.re( - r"I (?Pdry-)?run the playbooks?:?\s+(?P.+?)(?P\s+with error:?\s+)?(?(with_err)(?P.+)|\Z)", + r"I (?Pdry-)?run the playbooks?:?\s+(?P.+?)(?P\s+with error:?\s+)?(?(with_err)(?P.+)|\Z)", re.DOTALL, ) ) @@ -171,7 +171,7 @@ def ansible_playbook( galaxy_deps, ansible_extra_args, results, - playbooks, + arguments, dry_run, error, ): @@ -179,18 +179,12 @@ def ansible_playbook( dry_run = True else: dry_run = False - playbook_list = re.findall(r"[\w./]+", playbooks) - if not all(os.path.exists(p) for p in playbook_list): - playbook_list_subdir = [os.path.join("playbooks", p) for p in playbook_list] - if all(os.path.exists(p) for p in playbook_list_subdir): - playbook_list = playbook_list_subdir - else: - raise ValueError("All playbooks could not be found") + argument_list = re.findall(r"[^\s]+", arguments) result = run_ansible_playbook( virtualenv, - playbook_list, - ansible_extra_args=ansible_extra_args, inventory=inventory, + arguments=argument_list, + ansible_extra_args=ansible_extra_args, dry_run=dry_run, ) if error: @@ -206,9 +200,9 @@ def ansible_playbook( def ansible_kubeadm(inventory, virtualenv, galaxy_deps, ansible_extra_args, results): result = run_ansible_playbook( virtualenv, + inventory, ["tests/playbooks/verify.yml"], ansible_extra_args=ansible_extra_args, - inventory=inventory, ) assert_ansible_error(result) diff --git a/tests/features/haproxy.feature b/tests/features/haproxy.feature index 8b36f27..57acbcb 100644 --- a/tests/features/haproxy.feature +++ b/tests/features/haproxy.feature @@ -18,8 +18,8 @@ Feature: Haproxy apiserver_proxy_use_docker: true kube_version: 1.23 When I run the playbook tests/playbooks/prepare.yml - When I run the playbooks 00_apiserver_proxy.yml - 01_site.yml + When I run the playbooks playbooks/00_apiserver_proxy.yml + playbooks/01_site.yml When I run the playbook tests/playbooks/cni.yml Then I should have a working cluster @@ -27,15 +27,15 @@ Feature: Haproxy When With those group_vars on group all: apiserver_proxy_use_docker: When I reset tasks counters - When I run the playbooks 00_apiserver_proxy.yml - 01_site.yml + When I run the playbooks playbooks/00_apiserver_proxy.yml + playbooks/01_site.yml with error: As docker has been deprecated When With those group_vars on group all: apiserver_proxy_use_docker: false When I reset tasks counters - When I dry-run the playbooks 00_apiserver_proxy.yml - When I run the playbooks 00_apiserver_proxy.yml - 01_site.yml + When I dry-run the playbooks playbooks/00_apiserver_proxy.yml + When I run the playbooks playbooks/00_apiserver_proxy.yml + playbooks/01_site.yml Then I should have a working cluster diff --git a/tests/features/install.feature b/tests/features/install.feature index 99a499a..01990c6 100644 --- a/tests/features/install.feature +++ b/tests/features/install.feature @@ -17,16 +17,16 @@ Feature: Install cgroupDriver: "systemd" kube_version: When I run the playbook tests/playbooks/prepare.yml - When I dry-run the playbooks 00_apiserver_proxy.yml - 01_site.yml - When I run the playbooks 00_apiserver_proxy.yml - 01_site.yml + When I dry-run the playbooks playbooks/00_apiserver_proxy.yml + playbooks/01_site.yml + When I run the playbooks playbooks/00_apiserver_proxy.yml + playbooks/01_site.yml When I run the playbook tests/playbooks/cni.yml Then I should have a working cluster When I reset tasks counters - And I run the playbooks 00_apiserver_proxy.yml - 01_site.yml + And I run the playbooks playbooks/00_apiserver_proxy.yml + playbooks/01_site.yml Then I should see no orange/yellow changed tasks Examples: diff --git a/tests/features/join_nodes.feature b/tests/features/join_nodes.feature new file mode 100644 index 0000000..d0acd16 --- /dev/null +++ b/tests/features/join_nodes.feature @@ -0,0 +1,36 @@ +Feature: Join Nodes + A test to join nodes to a kubeadm cluster + + Scenario: Join nodes via ansible-kubeadm + Given I want ansible 3 + Given The cluster control_plane_count = 1 + Given The cluster worker_count = 0 + Given Some running VMs + + When With those group_vars on group all: + cluster_config: + networking: + podSubnet: 10.95.0.0/16 + controllerManager: + extraArgs: + "allocate-node-cidrs": "true" + kubelet_config: + cgroupDriver: "systemd" + kube_version: 1.23 + When I run the playbook tests/playbooks/prepare.yml + When I run the playbooks playbooks/00_apiserver_proxy.yml + playbooks/01_site.yml + When I run the playbook tests/playbooks/cni.yml + Then I should have a working cluster + + Then Set cluster worker_count = 1 + When I run the playbook tests/playbooks/prepare.yml + When I run the playbooks playbooks/01_site.yml -e "limit=*-node-1" + Then I should have a working cluster + + Then Set cluster control_plane_count = 2 + When I run the playbook tests/playbooks/prepare.yml + When I run the playbooks playbooks/00_apiserver_proxy.yml + playbooks/01_site.yml + + Then I should have a working cluster diff --git a/tests/features/upgrade.feature b/tests/features/upgrade.feature index 111cd01..4420435 100644 --- a/tests/features/upgrade.feature +++ b/tests/features/upgrade.feature @@ -18,13 +18,13 @@ Feature: Upgrade kube_version: action_reasons_review_skip: true When I run the playbook tests/playbooks/prepare.yml - When I run the playbooks 00_apiserver_proxy.yml - 01_site.yml + When I run the playbooks playbooks/00_apiserver_proxy.yml + playbooks/01_site.yml When I run the playbook tests/playbooks/cni.yml When With those group_vars on group all: kube_version: - When I run the playbooks 00_apiserver_proxy.yml - 01_site.yml + When I run the playbooks playbooks/00_apiserver_proxy.yml + playbooks/01_site.yml Then I should have a working cluster diff --git a/tests/helpers/ansible.py b/tests/helpers/ansible.py index 8a4b8b0..7c5c935 100644 --- a/tests/helpers/ansible.py +++ b/tests/helpers/ansible.py @@ -3,6 +3,7 @@ import re import ansible_runner +import ansible_runner.interface def install_ansible(virtualenv, version=None): @@ -23,20 +24,15 @@ def install_galaxy_deps(virtualenv): "install", "-r", os.path.join(test_dir, "ansible.requirements.yml"), - "-p", - os.path.join(test_dir, "playbooks/roles"), ] ) def run_ansible_playbook( - virtualenv, playbooks, dry_run=False, ansible_extra_args=None, **kwargs + virtualenv, inventory, arguments, dry_run=False, ansible_extra_args=None, **kwargs ): - if isinstance(playbooks, str): - playbooks = [playbooks] - playbooks = [ - os.path.join(os.path.dirname(__file__), "../..", pbk) for pbk in playbooks - ] + if isinstance(arguments, str): + arguments = [arguments] # ansible_runner has several "bugs": # - Don't accept multiple playbooks on the parameter "playbook" (which is supposed to accept list) # - If you pass custom binary it cannot say if ansible or ansible-playbook so doesn't inject playbook anymore @@ -44,15 +40,21 @@ def run_ansible_playbook( envvars = dict(os.environ) envvars.setdefault("ANSIBLE_HOST_KEY_CHECKING", "false") envvars.setdefault("ANSIBLE_FORCE_COLOR", "true") - cmdline = " ".join(itertools.chain(ansible_extra_args or [], playbooks)) + cmdline_args = [ + "-i", + inventory, + *itertools.chain(arguments, ansible_extra_args or []), + ] if dry_run: - cmdline += " -C" - return ansible_runner.run( - binary=os.path.join(virtualenv.virtualenv, "bin/ansible-playbook"), - cmdline=cmdline, + cmdline_args += ["-C"] + runner = ansible_runner.interface.init_command_config( + executable_cmd=os.path.join(virtualenv.virtualenv, "bin/ansible-playbook"), + cmdline_args=cmdline_args, envvars=envvars, **kwargs ) + runner.run() + return runner def assert_ansible_error(run): diff --git a/tests/test_basic.py b/tests/test_basic.py index 1f18d33..64e8d5b 100644 --- a/tests/test_basic.py +++ b/tests/test_basic.py @@ -17,3 +17,8 @@ def test_upgrade(operating_system): @scenario("features/haproxy.feature", "Test upgrade to haproxy pkg") def test_haproxy(operating_system): pass + + +@scenario("features/join_nodes.feature", "Join nodes via ansible-kubeadm") +def test_join_nodes(operating_system): + pass