diff --git a/config.yml b/config.yml index f446adfb4..12471152e 100644 --- a/config.yml +++ b/config.yml @@ -8,8 +8,8 @@ host-private-key: '' # VLAN ID, will invoke the tests depended on external networking if not set to -1. vlan-id: 1 -# Physical NIC for VLAN. Default is "harvester-mgmt" -vlan-nic: 'harvester-mgmt' +# Physical NIC for VLAN. Default is "mgmt" +vlan-nic: 'mgmt' ip-pool-subnet: '192.168.0.0/24' ip-pool-start: '' ip-pool-end: '' @@ -22,15 +22,12 @@ sleep-timeout: 3 # script location to manipulate node power cycle node-scripts-location: 'scripts/vagrant' +# Images and its sha512sum opensuse-image-url: https://download.opensuse.org/repositories/Cloud:/Images:/Leap_15.5/images/openSUSE-Leap-15.5.x86_64-NoCloud.qcow2 -# sha512sum for opensuse image-url opensuse-checksum: '' - ubuntu-image-url: https://cloud-images.ubuntu.com/releases/releases/22.04/release/ubuntu-22.04-server-cloudimg-amd64.img -# sha512sum for ubuntu image-url ubuntu-checksum: '' - -# URL to download all images +# cache URL, convert image-url to / image-cache-url: '' # script location for terraform related test cases @@ -51,7 +48,7 @@ accessKeyId: '' secretAccessKey: '' bucketName: '' -# Backup Targer NFS +# Backup Target NFS nfs-endpoint: '' nfs-mount-dir: 'nfsshare' diff --git a/harvester_e2e_tests/fixtures/settings.py b/harvester_e2e_tests/fixtures/settings.py index e1d3a6c3c..982c9a13a 100644 --- a/harvester_e2e_tests/fixtures/settings.py +++ b/harvester_e2e_tests/fixtures/settings.py @@ -10,7 +10,8 @@ def setting_checker(api_client, wait_timeout, sleep_timeout): class SettingChecker: def __init__(self): self.settings = api_client.settings - self.network_annotation = 'k8s.v1.cni.cncf.io/network-status' + self.nets_annotation = 'k8s.v1.cni.cncf.io/networks' + self.net_status_annotation = 'k8s.v1.cni.cncf.io/network-status' def _storage_net_configured(self): code, data = self.settings.get('storage-network') @@ -44,14 +45,7 @@ def _lh_instance_mgrs_running(self): for imgr in lh_instance_mgrs: if 'Running' != imgr['status']['phase']: - return False, (f"Pod {imgr['id']} is NOT Running", imgr) - - if not (self.network_annotation in imgr['metadata']['annotations']): - return False, (f"No annotation '{self.network_annotation}' on pod", imgr) - - networks = json.loads(imgr['metadata']['annotations'][self.network_annotation]) - if not networks: - return False, (f"Pod annotation '{self.network_annotation}' is empty", imgr) + return False, (f"Pod {imgr['id']} is not Running", imgr) return True, (None, lh_instance_mgrs) @@ -62,16 +56,38 @@ def wait_storage_net_enabled_on_longhorn(self, snet_cidr): return False, (code, data) for imgr in data: - networks = json.loads(imgr['metadata']['annotations'][self.network_annotation]) + annotations = imgr['metadata']['annotations'] + + for na in [self.nets_annotation, self.net_status_annotation]: + if na not in annotations: + return False, (f"Pod has no annotation {na}", imgr) + + # Check k8s.v1.cni.cncf.io/networks + try: + nets = json.loads(annotations[self.nets_annotation]) + snet = next(n for n in nets if 'lhnet1' == n.get('interface')) + except StopIteration: + msg = f"Annotation {self.nets_annotation} has no interface 'lhnet1'" + return False, (msg, imgr) + + # Check k8s.v1.cni.cncf.io/network-status try: - snet_network = next(n for n in networks if 'lhnet1' == n.get('interface')) + net_statuses = json.loads(annotations[self.net_status_annotation]) + snet_status = next(s for s in net_statuses if 'lhnet1' == s.get('interface')) except StopIteration: - return False, ("No dedicated interface interface 'lhnet1'", imgr) + msg = f"Annotation {self.net_status_annotation} has no interface 'lhnet1'" + return False, (msg, imgr) - snet_ips = snet_network.get('ips', ['::1']) + snet_ips = snet_status.get('ips', ['::1']) if not all(ip_address(sip) in ip_network(snet_cidr) for sip in snet_ips): return False, (f"Dedicated IPs {snet_ips} does NOT fits {snet_cidr}", imgr) + # Check network name identical in both annotations + if f"{snet.get('namespace')}/{snet.get('name')}" != snet_status.get('name'): + msg = "Network name is not identical between annotations {} and {}".format( + self.nets_annotation, self.net_status_annotation) + return False, (msg, imgr) + return True, (None, None) @wait_until(wait_timeout, sleep_timeout) @@ -81,12 +97,8 @@ def wait_storage_net_disabled_on_longhorn(self): return False, (code, data) for imgr in data: - networks = json.loads(imgr['metadata']['annotations'][self.network_annotation]) - try: - next(n for n in networks if 'lhnet1' == n.get('interface')) - return False, ("No dedicated interface 'lhnet1'", imgr) - except StopIteration: - continue + if self.nets_annotation in imgr['metadata']['annotations']: + return False, (f"Pod should not has annotation {self.nets_annotation}", imgr) return True, (None, None) diff --git a/harvester_e2e_tests/integrations/test_0_storage_network.py b/harvester_e2e_tests/integrations/test_0_storage_network.py index 3f9ae360e..44ab69b9e 100644 --- a/harvester_e2e_tests/integrations/test_0_storage_network.py +++ b/harvester_e2e_tests/integrations/test_0_storage_network.py @@ -2,7 +2,6 @@ from time import sleep from operator import add from functools import reduce -from ipaddress import ip_address, ip_network from datetime import datetime, timedelta import pytest @@ -129,58 +128,18 @@ def test_storage_network( f"API Status({code}): {data}" ) _ = api_client.networks.delete(unique_name) - cidr = route['cidr'] + vlan_cidr = route['cidr'] # Create storage-network - spec = api_client.settings.StorageNetworkSpec.enable_with(vlan_id, cluster_network, cidr) - code, data = api_client.settings.update('storage-network', spec) + enable_spec = api_client.settings.StorageNetworkSpec.enable_with( + vlan_id, cluster_network, vlan_cidr + ) + code, data = api_client.settings.update('storage-network', enable_spec) assert 200 == code, (code, data) - - # Verify Configuration is Completed - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.settings.get('storage-network') - conds = data.get('status', {}).get('conditions', []) - if conds and 'True' == conds[-1].get('status') and 'Completed' == conds[-1].get('reason'): - break - sleep(3) - else: - raise AssertionError( - "Storage network updated but not completed.\n" - f"API Status({code}): {data}" - ) - - # Verify Longhorn status - done, ip_range = [], ip_network(cidr) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.get_pods(namespace='longhorn-system') - lh_instance_mgrs = [d for d in data['data'] - if 'instance-manager' in d['id'] and d['id'] not in done] - retries = [] - for im in lh_instance_mgrs: - if 'Running' != im['status']['phase']: - retries.append(im) - continue - nets = json.loads(im['metadata']['annotations']['k8s.v1.cni.cncf.io/network-status']) - try: - dedicated = next(n for n in nets if 'lhnet1' == n.get('interface')) - except StopIteration: - retries.append(im) - continue - - if not all(ip_address(ip) in ip_range for ip in dedicated.get('ips', ['::1'])): - retries.append(im) - continue - - if not retries: - break - sleep(3) - else: - raise AssertionError( - f"{len(retries)} Longhorn's instance manager not be updated after {wait_timeout}s\n" - f"Not completed: {retries}" - ) + snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_harvester() + assert snet_enabled, (code, data) + snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_longhorn(vlan_cidr) + assert snet_enabled, (code, data) # teardown disable_spec = api_client.settings.StorageNetworkSpec.disable()