Skip to content

Commit

Permalink
Configure Manila with an NFS network
Browse files Browse the repository at this point in the history
Manila Tempest tests need to connect to the NFS share
for Ganesha tests, and they use a special (openstack)
network for that [1].

This patch adds an NFS network with VLAN 24 and range
172.21.0.0/24 in reproducers networking-definition.yml.
It also adds a multus range for this network so that the
Tempest pod can access this network for testing. The NFS
network is added to the OCP nodes for the same reason.
The podified-multinode-hci-deployment-crc job is updated
to not deploy manila since it was never tested by tempest.

This patch updates playbook manila_create_default_resources.yml
so that when CI for manila runs, a provider network is created.
Variables manila_provider_network_{name,vlan,start,end,range}
default to the storage network, but can be overridden to the
NFS network within a CI job definition.

[1] https://opendev.org/openstack/manila-tempest-plugin/src/branch/master/manila_tempest_tests/config.py#L99

Jira: https://issues.redhat.com/browse/OSPRH-7417

Signed-off-by: John Fulton <[email protected]>
  • Loading branch information
fultonj authored and fmount committed Oct 3, 2024
1 parent 82ccefe commit 34ed0e4
Show file tree
Hide file tree
Showing 7 changed files with 155 additions and 28 deletions.
24 changes: 24 additions & 0 deletions hooks/playbooks/manila_create_default_resources.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,31 @@
extra_specs:
snapshot_support: "True"
create_share_from_snapshot_support: "True"
manila_provider_network_name: storage
manila_provider_network_vlan: 21
manila_provider_network_start: 172.18.0.150
manila_provider_network_end: 172.18.0.200
manila_provider_network_range: 172.18.0.0/24
tasks:
- name: Create Manila provider network with Neutron for instance to access Manila
environment:
KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}"
PATH: "{{ cifmw_path }}"
ansible.builtin.command: |
oc -n {{ namespace }} exec -it pod/openstackclient -- {{ item }}
loop:
- "openstack network create {{ manila_provider_network_name }} --share --provider-network-type vlan --provider-physical-network datacentre --provider-segment {{ manila_provider_network_vlan }}"
- "openstack subnet create --allocation-pool start={{ manila_provider_network_start }},end={{ manila_provider_network_end }} --dhcp --network {{ manila_provider_network_name }} --subnet-range {{ manila_provider_network_range }} --gateway none {{ manila_provider_network_name }}-subnet"
register: _manila_provider_network_creation
failed_when: >-
( _manila_provider_network_creation.rc | int ) != 0
when:
- manila_provider_network_name | length > 0
- (manila_provider_network_vlan | string) | length > 0
- manila_provider_network_start | length > 0
- manila_provider_network_end | length > 0
- manila_provider_network_range | length > 0

- name: Create share type default for manila tempest plugin tests
environment:
KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}"
Expand Down
69 changes: 49 additions & 20 deletions playbooks/ceph.yml
Original file line number Diff line number Diff line change
Expand Up @@ -344,36 +344,67 @@
# public network always exist because is provided by the ceph_spec role
- name: Get Storage network range
ansible.builtin.set_fact:
cifmw_cephadm_rgw_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}"
cifmw_cephadm_storage_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}"

- name: Set IP address of first monitor
ansible.builtin.set_fact:
cifmw_cephadm_first_mon_ip: "{{ hostvars[this_host][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first }}"
cifmw_cephadm_first_mon_ip: "{{ hostvars[this_host][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_storage_network) | first }}"
vars:
this_host: "{{ _target_hosts | first }}"

- name: Assert if any EDPM nodes n/w interface is missing in storage network
ansible.builtin.assert:
that:
- hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | length > 0
fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_rgw_network }}"
- hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_storage_network) | length > 0
fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_storage_network }}"
loop: "{{ _target_hosts }}"

- name: Get already assigned IP addresses
ansible.builtin.set_fact:
ips: "{{ ips | default([]) + [ hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first ] }}"
loop: "{{ _target_hosts }}"
- name: Set NFS Network Properties
when:
- cifmw_ceph_daemons_layout.ceph_nfs_enabled | default(false) | bool
block:
- name: Set NFS network range to storage network only if it was not provided
ansible.builtin.set_fact:
cifmw_cephadm_nfs_network: "{{ cifmw_cephadm_storage_network }}"
when:
- cifmw_cephadm_nfs_network is not defined or
cifmw_cephadm_nfs_network | length == 0

- name: Assert if any EDPM nodes n/w interface is missing in NFS network
ansible.builtin.assert:
that:
- hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_nfs_network) | length > 0
fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_nfs_network }}"
loop: "{{ _target_hosts }}"
when:
- cifmw_cephadm_nfs_network != cifmw_cephadm_storage_network

# cifmw_cephadm_vip is the VIP reserved in the Storage network
- name: Set VIP var as empty string
ansible.builtin.set_fact:
cifmw_cephadm_vip: ""
- name: Get already assigned NFS IP addresses
ansible.builtin.set_fact:
ips: "{{ ips | default([]) + [ hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_nfs_network) | first ] }}"
loop: "{{ _target_hosts }}"

- name: Process VIP
ansible.builtin.include_role:
name: cifmw_cephadm
tasks_from: check_vip
loop: "{{ range(1, (ips | length) + 1) | list }}"
- name: Set VIP var as empty string
ansible.builtin.set_fact:
cifmw_cephadm_vip: ""
when:
- cifmw_cephadm_nfs_vip is undefined

- name: Get NFS VIP
ansible.builtin.include_role:
name: cifmw_cephadm
tasks_from: check_vip
loop: "{{ range(1, (ips | length) + 1) | list }}"
vars:
cifmw_cephadm_vip_network: "{{ cifmw_cephadm_nfs_network | default(storage_network_range, true) | default(ssh_network_range, true) }}"
when:
- cifmw_cephadm_nfs_vip is undefined

- name: Set NFS VIP
ansible.builtin.set_fact:
cifmw_cephadm_nfs_vip: "{{ cifmw_cephadm_vip }}"
when:
- cifmw_cephadm_nfs_vip is undefined

tasks:
- name: Satisfy Ceph prerequisites
Expand Down Expand Up @@ -409,6 +440,7 @@
vars:
# cifmw_cephadm_vip is computed or passed as an override via -e @extra.yml
cifmw_cephadm_rgw_vip: "{{ cifmw_cephadm_vip }}"
cifmw_cephadm_rgw_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}"

- name: Configure Monitoring Stack
when: cifmw_ceph_daemons_layout.dashboard_enabled | default(false) | bool
Expand All @@ -432,9 +464,6 @@
ansible.builtin.import_role:
name: cifmw_cephadm
tasks_from: cephnfs
vars:
# we reuse the same VIP reserved for rgw
cifmw_cephadm_nfs_vip: "{{ cifmw_cephadm_vip }}/{{ cidr }}"

- name: Create Cephx Keys for OpenStack
ansible.builtin.import_role:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,15 @@ instances:
parent_interface: enp6s0
skip_nm: false
vlan_id: 22
nfs:
interface_name: eth1.24
ip_v4: 172.21.0.5
mac_addr: '52:54:00:0c:a1:d9'
mtu: 1500
network_name: nfs
parent_interface: eth1
skip_nm: false
vlan_id: 24
ocp-master-1:
hostname: ocp-master-1
name: ocp-master-1
Expand Down Expand Up @@ -124,6 +133,15 @@ instances:
parent_interface: enp6s0
skip_nm: false
vlan_id: 22
nfs:
interface_name: eth1.24
ip_v4: 172.21.0.6
mac_addr: '52:54:00:0c:a0:d9'
mtu: 1500
network_name: nfs
parent_interface: eth1
skip_nm: false
vlan_id: 24
ocp-master-2:
hostname: ocp-master-2
name: ocp-master-2
Expand Down Expand Up @@ -162,6 +180,15 @@ instances:
parent_interface: enp6s0
skip_nm: false
vlan_id: 22
nfs:
interface_name: eth1.24
ip_v4: 172.21.0.7
mac_addr: '52:54:00:0b:a1:d9'
mtu: 1500
network_name: nfs
parent_interface: eth1
skip_nm: false
vlan_id: 24
networks:
ctlplane:
dns_v4:
Expand Down Expand Up @@ -335,3 +362,35 @@ networks:
start_host: 100
ipv6_ranges: []
vlan_id: 22
nfs:
dns_v4: []
dns_v6: []
mtu: 1496
network_name: nfs
network_v4: 172.21.0.0/24
search_domain: nfs.example.com
tools:
metallb:
ipv4_ranges:
- end: 172.21.0..90
end_host: 90
length: 11
start: 172.21.0.80
start_host: 80
ipv6_ranges: []
multus:
ipv4_ranges:
- end: 172.21.0.70
end_host: 70
length: 41
start: 172.21.0.30
start_host: 30
ipv6_ranges: []
netconfig:
ipv4_ranges:
- end: 72.22.0.250
end_host: 250
length: 151
start: 172.21.0.100
start_host: 100
vlan_id: 24
10 changes: 4 additions & 6 deletions roles/cifmw_cephadm/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -77,18 +77,16 @@ need to be changed for a typical EDPM deployment.
is gathered from the `cifmw_cephadm_bootstrap_conf` file, which represents
the initial Ceph configuration file passed at bootstrap time.

* `cifmw_cephadm_rgw_network`: the Ceph `public_network` where the `radosgw`
instances should be bound. The network range is gathered from the
`cifmw_cephadm_bootstrap_conf` file, which represents the initial Ceph
configuration file passed at bootstrap time.
* `cifmw_cephadm_nfs_network`: The network for NFS `ganesha`. If this
value is not passed then the Ceph `public_network` which represents
the initial Ceph configuration file passed at bootstrap time.

* `cifmw_cephadm_rgw_vip`: the ingress daemon deployed along with `radosgw`
requires a `VIP` that will be owned by `keepalived`. This IP address will
be used as entry point to reach the `radosgw backends` through `haproxy`.

* `cifmw_cephadm_nfs_vip`: the ingress daemon deployed along with the `nfs`
cluster requires a `VIP` that will be owned by `keepalived`. This IP
address is the same used for rgw unless an override is passed, and it's
cluster requires a `VIP` that will be owned by `keepalived`. This IP is
used as entry point to reach the `ganesha backends` through an `haproxy`
instance where proxy-protocol is enabled.

Expand Down
2 changes: 1 addition & 1 deletion roles/cifmw_cephadm/tasks/check_vip.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

- name: Get an IP address from the Storage network
ansible.builtin.set_fact:
cur_ip: "{{ cifmw_cephadm_rgw_network | ansible.utils.next_nth_usable(count) }}"
cur_ip: "{{ cifmw_cephadm_vip_network | ansible.utils.next_nth_usable(count) }}"

- name: Reserve VIP if the address is available
ansible.builtin.set_fact:
Expand Down
1 change: 0 additions & 1 deletion roles/cifmw_cephadm/tasks/post.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@
vars:
cifmw_cephadm_rgw_vip: "{{ cifmw_cephadm_vip }}"


- name: Dashboard service validation
ansible.builtin.include_tasks: dashboard/validation.yml
when: cifmw_ceph_daemons_layout.dashboard_enabled | default(false) | bool
18 changes: 18 additions & 0 deletions scenarios/reproducers/networking-definition.yml
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,20 @@ cifmw_networking_definition:
end: 250
vlan: 23
mtu: 1500
nfs:
network: "172.21.0.0/24"
tools:
netconfig:
ranges:
- start: 100
end: 250
multus:
ranges:
- start: 30
end: 70
vlan: 24
mtu: 1500


group-templates:
ocps:
Expand All @@ -106,6 +120,8 @@ cifmw_networking_definition:
trunk-parent: ctlplane
storage:
trunk-parent: ctlplane
nfs:
trunk-parent: ctlplane
ocp_workers:
network-template:
range:
Expand All @@ -127,6 +143,8 @@ cifmw_networking_definition:
trunk-parent: ctlplane
storagemgmt:
trunk-parent: ctlplane
nfs:
trunk-parent: ctlplane
cephs:
network-template:
range:
Expand Down

0 comments on commit 34ed0e4

Please sign in to comment.