From 1731073968f3def7845960bb8b4064ef0611bdd0 Mon Sep 17 00:00:00 2001 From: Dan Radez Date: Thu, 3 Oct 2024 09:23:51 -0400 Subject: [PATCH] generate inventory for hybrid deployments --- README.md | 1 + ansible/mno-deploy.yml | 4 ++ .../roles/create-ai-cluster/tasks/main.yml | 7 +++ .../templates/mac_interface_map.json.j2 | 2 + .../templates/inventory-mno.j2 | 14 +++++- .../mno-post-cluster-install/tasks/main.yml | 2 +- .../wait-hosts-discovered/tasks/main.yml | 2 +- .../tasks/set_hostname_role.yml | 49 ++++++++++++------- 8 files changed, 58 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index e1a89794..836b07e5 100644 --- a/README.md +++ b/README.md @@ -136,6 +136,7 @@ Make sure to set/review the following vars: | `lab_cloud` | the cloud within the lab environment for Red Hat Performance labs (Example: `cloud42`) | `cluster_type` | either `mno`, or `sno` for the respective cluster layout | `worker_node_count` | applies to mno cluster type for the desired worker count, ideal for leaving left over inventory hosts for other purposes +| `hybrid_worker_count` | applies to mno cluster type for the desired virtual worker count, HV nodes and VMs are required to be setup. | `bastion_lab_interface` | set to the bastion machine's lab accessible interface | `bastion_controlplane_interface` | set to the interface in which the bastion will be networked to the deployed ocp cluster | `controlplane_lab_interface` | applies to mno cluster type and should map to the nodes interface in which the lab provides dhcp to and also required for public routable vlan based sno deployment(to disable this interface) diff --git a/ansible/mno-deploy.yml b/ansible/mno-deploy.yml index c0a3cf0d..a7b30903 100644 --- a/ansible/mno-deploy.yml +++ b/ansible/mno-deploy.yml @@ -24,6 +24,10 @@ vars: inventory_group: worker index: "{{ worker_node_count }}" + - role: boot-iso + vars: + inventory_group: hv_vm + index: "{{ hybrid_worker_count }}" - wait-hosts-discovered - configure-local-storage - install-cluster diff --git a/ansible/roles/create-ai-cluster/tasks/main.yml b/ansible/roles/create-ai-cluster/tasks/main.yml index e13d5463..bdd1cfab 100644 --- a/ansible/roles/create-ai-cluster/tasks/main.yml +++ b/ansible/roles/create-ai-cluster/tasks/main.yml @@ -32,6 +32,13 @@ - cluster_type == "mno" loop: "{{ groups['worker'] }}" +- name: MNO / Hybrid (VM Workers) - Populate static network configuration with VM worker nodes + include_tasks: static_network_config.yml + when: + - cluster_type == "mno" + - hybrid_worker_count > 0 + loop: "{{ groups['hv_vm'][hybrid_worker_offset:hybrid_worker_offset+hybrid_worker_count] }}" + # - debug: # msg: "{{ static_network_config }}" diff --git a/ansible/roles/create-ai-cluster/templates/mac_interface_map.json.j2 b/ansible/roles/create-ai-cluster/templates/mac_interface_map.json.j2 index 15dacb3c..bd2fbd3f 100644 --- a/ansible/roles/create-ai-cluster/templates/mac_interface_map.json.j2 +++ b/ansible/roles/create-ai-cluster/templates/mac_interface_map.json.j2 @@ -2,9 +2,11 @@ { "mac_address": "{{ hostvars[item]['mac_address'] }}", "logical_nic_name": "{{ hostvars[item]['network_interface'] }}" + {% if 'lab_mac' in hostvars[item] %} }, { "mac_address": "{{ hostvars[item]['lab_mac'] }}", "logical_nic_name": "{{ hostvars[item]['lab_interface'] }}" + {% endif %} } ] diff --git a/ansible/roles/create-inventory/templates/inventory-mno.j2 b/ansible/roles/create-inventory/templates/inventory-mno.j2 index 1e14e099..6e6a6bd1 100644 --- a/ansible/roles/create-inventory/templates/inventory-mno.j2 +++ b/ansible/roles/create-inventory/templates/inventory-mno.j2 @@ -85,12 +85,12 @@ network_prefix={{ controlplane_network_prefix }} {% for hv in ocpinventory_hv_nodes %} {% set hv_loop = loop %} {% for vm in range(hw_vm_counts[lab][(hv.pm_addr.split('.')[0]).split('-')[-1]]['default']) %} -{{ hv_vm_prefix }}{{ '%05d' % ctr.vm }} ansible_host={{ hv.pm_addr | replace('mgmt-','') }} hv_ip={{ controlplane_network | ansible.utils.nthhost(hv_loop.index + ocpinventory_worker_nodes|length + mno_worker_node_offset + hv_ip_offset) }} ip={{ controlplane_network | ansible.utils.nthhost(hv_vm_ip_offset + ctr.vm - 1) }} cpus={{ hv_vm_cpu_count }} memory={{ hv_vm_memory_size }} disk_size={{ hv_vm_disk_size }} vnc_port={{ 5900 + loop.index }} mac_address={{ (90520730730496 + ctr.vm) | ansible.utils.hwaddr('linux') }} domain_uuid={{ ctr.vm | to_uuid }} disk_location=/var/lib/libvirt/images bw_avg={{ hv_vm_bandwidth_average }} bw_peak={{ hv_vm_bandwidth_peak }} bw_burst={{ hv_vm_bandwidth_burst }} +{{ hv_vm_prefix }}{{ '%05d' % ctr.vm }} ansible_host={{ hv.pm_addr | replace('mgmt-','') }} hv_ip={{ controlplane_network | ansible.utils.nthhost(hv_loop.index + ocpinventory_worker_nodes|length + mno_worker_node_offset + hv_ip_offset) }} ip={{ controlplane_network | ansible.utils.nthhost(hv_vm_ip_offset + ctr.vm - 1) }} cpus={{ hv_vm_cpu_count }} memory={{ hv_vm_memory_size }} disk_size={{ hv_vm_disk_size }} vnc_port={{ 5900 + loop.index }} mac_address={{ (90520730730496 + ctr.vm) | ansible.utils.hwaddr('linux') }} domain_uuid={{ ctr.vm | to_uuid }} disk_location=/var/lib/libvirt/images bw_avg={{ hv_vm_bandwidth_average }} bw_peak={{ hv_vm_bandwidth_peak }} bw_burst={{ hv_vm_bandwidth_burst }} vendor=Libvirt install_disk=/dev/sda {% set ctr.vm = ctr.vm + 1 %} {% endfor %} {% if hv.disk2_enable %} {% for vm in range(hw_vm_counts[lab][(hv.pm_addr.split('.')[0]).split('-')[-1]][hv.disk2_device]) %} -{{ hv_vm_prefix }}{{ '%05d' % ctr.vm }} ansible_host={{ hv.pm_addr | replace('mgmt-','') }} hv_ip={{ controlplane_network | ansible.utils.nthhost(hv_loop.index + ocpinventory_worker_nodes|length + mno_worker_node_offset + hv_ip_offset) }} ip={{ controlplane_network | ansible.utils.nthhost(hv_vm_ip_offset + ctr.vm - 1) }} cpus={{ hv_vm_cpu_count }} memory={{ hv_vm_memory_size }} disk_size={{ hv_vm_disk_size }} vnc_port={{ 5900 + loop.index + hw_vm_counts[lab][(hv.pm_addr.split('.')[0]).split('-')[-1]]['default'] }} mac_address={{ (90520730730496 + ctr.vm) | ansible.utils.hwaddr('linux') }} domain_uuid={{ ctr.vm | to_uuid }} disk_location={{ disk2_mount_path }}/libvirt/images bw_avg={{ hv_vm_bandwidth_average }} bw_peak={{ hv_vm_bandwidth_peak }} bw_burst={{ hv_vm_bandwidth_burst }} +{{ hv_vm_prefix }}{{ '%05d' % ctr.vm }} ansible_host={{ hv.pm_addr | replace('mgmt-','') }} hv_ip={{ controlplane_network | ansible.utils.nthhost(hv_loop.index + ocpinventory_worker_nodes|length + mno_worker_node_offset + hv_ip_offset) }} ip={{ controlplane_network | ansible.utils.nthhost(hv_vm_ip_offset + ctr.vm - 1) }} cpus={{ hv_vm_cpu_count }} memory={{ hv_vm_memory_size }} disk_size={{ hv_vm_disk_size }} vnc_port={{ 5900 + loop.index + hw_vm_counts[lab][(hv.pm_addr.split('.')[0]).split('-')[-1]]['default'] }} mac_address={{ (90520730730496 + ctr.vm) | ansible.utils.hwaddr('linux') }} domain_uuid={{ ctr.vm | to_uuid }} disk_location={{ disk2_mount_path }}/libvirt/images bw_avg={{ hv_vm_bandwidth_average }} bw_peak={{ hv_vm_bandwidth_peak }} bw_burst={{ hv_vm_bandwidth_burst }} vendor=Libvirt install_disk=/dev/sda {% set ctr.vm = ctr.vm + 1 %} {% endfor %} {% endif %} @@ -105,6 +105,16 @@ machine_network={{ controlplane_network }} network_prefix={{ controlplane_network_prefix }} gateway={{ controlplane_network_gateway }} bw_limit={{ hv_vm_bandwidth_limit }} + +boot_iso=discovery.iso +lab_interface={{ controlplane_lab_interface }} +network_interface={{ controlplane_network_interface }} +{% if controlplane_bastion_as_dns %} +dns1={{ bastion_controlplane_ip }} +{% else %} +dns1={{ labs[lab]['dns'][0] }} +dns2={{ labs[lab]['dns'][1] | default('') }} +{% endif %} {% else %} [hv] # Set `hv_inventory: true` to populate diff --git a/ansible/roles/mno-post-cluster-install/tasks/main.yml b/ansible/roles/mno-post-cluster-install/tasks/main.yml index eb20c133..228943a3 100644 --- a/ansible/roles/mno-post-cluster-install/tasks/main.yml +++ b/ansible/roles/mno-post-cluster-install/tasks/main.yml @@ -154,7 +154,7 @@ - name: Label the worker nodes shell: | KUBECONFIG={{ bastion_cluster_config_dir }}/kubeconfig oc label no --overwrite {{ item }} localstorage=true prometheus=true - with_items: "{{ groups['worker'] }}" + with_items: "{{ groups['worker'] + groups['hv_vm'][hybrid_worker_offset:hybrid_worker_offset+hybrid_worker_count] }}" - name: Install local-storage operator shell: diff --git a/ansible/roles/wait-hosts-discovered/tasks/main.yml b/ansible/roles/wait-hosts-discovered/tasks/main.yml index 12530ddc..be4c092d 100644 --- a/ansible/roles/wait-hosts-discovered/tasks/main.yml +++ b/ansible/roles/wait-hosts-discovered/tasks/main.yml @@ -3,7 +3,7 @@ - name: MNO - Create list of nodes to be discovered set_fact: - inventory_nodes: "{{ groups['controlplane'] + groups['worker'] }}" + inventory_nodes: "{{ groups['controlplane'] + groups['worker'] + groups['hv_vm'][hybrid_worker_offset:hybrid_worker_offset+hybrid_worker_count] }}" when: cluster_type == "mno" - name: SNO - Create list of nodes to be discovered diff --git a/ansible/roles/wait-hosts-discovered/tasks/set_hostname_role.yml b/ansible/roles/wait-hosts-discovered/tasks/set_hostname_role.yml index 7c8bbb44..9c494de4 100644 --- a/ansible/roles/wait-hosts-discovered/tasks/set_hostname_role.yml +++ b/ansible/roles/wait-hosts-discovered/tasks/set_hostname_role.yml @@ -1,39 +1,50 @@ --- # Set hostname and role -- name: Set the host bmc address +- name: Lookup the discovered host's bmc address set_fact: host_bmc: "{{ (discovered_host.inventory | from_json).bmc_address }}" when: lab in cloud_labs -- name: Set the host ID +- name: Lookup the discovered host's mac address + set_fact: + host_mac: "{{ (discovered_host.inventory | from_json).interfaces.0.mac_address }}" + when: lab in rh_labs + +- name: Lookup the discovered host's ID set_fact: host_interfaces: "{{ (discovered_host.inventory | from_json).interfaces }}" host_id: "{{ discovered_host.id }}" -- name: Set the hostname and role via mac address - set_fact: - hostname: "{{ item.0 }}" - host_role: "{{ hostvars[item.0]['role'] }}" +- name: Lookup the discovered host's hostname and role via mac address + block: + - name: "Set the hostname" + set_fact: + hostname: "{{ item }}" + with_items: "{{ hostvars | json_query(json_qry) }}" + vars: + json_qry: "(*)[?lab_mac=='{{ host_mac }}' || mac_address=='{{ host_mac }}'].inventory_hostname" + - name: Set the role + set_fact: + host_role: "{{ hostvars[hostname]['role'] }}" when: - lab in rh_labs - - hostvars[item.0]['lab_mac'] == item.1.mac_address - loop: "{{ inventory_nodes | product(host_interfaces) | list }}" - loop_control: - label: "{{ hostvars[item.0]['bmc_address'] }}" -- name: Set the hostname and role via bmc address - set_fact: - hostname: "{{ item }}" - host_role: "{{ hostvars[item]['role'] }}" +- name: Lookup the discovered host's hostname and role via bmc address + block: + - name: "Set the hostname" + set_fact: + hostname: "{{ item }}" + with_items: "{{ hostvars | json_query(json_qry) }}" + vars: + json_qry: "(*)[?bmc_address=='{{ host_bmc }}'].inventory_hostname" + - name: Set the role + set_fact: + host_role: "{{ hostvars[item]['role'] }}" when: - lab in cloud_labs - - hostvars[item]['bmc_address'] == host_bmc - loop: "{{ inventory_nodes }}" - loop_control: - label: "{{ hostvars[item]['bmc_address'] }}" -- name: Set hostname and role for {{ hostname }} +- name: Set the discovered host's hostname and role in assisted installer uri: url: "http://{{ assisted_installer_host }}:{{ assisted_installer_port }}/api/assisted-install/v2/infra-envs/{{ ai_infraenv_id }}/hosts/{{ host_id }}" method: PATCH