diff --git a/acktools/__init__.py b/acktools/__init__.py index c9eb655c..a2671fad 100644 --- a/acktools/__init__.py +++ b/acktools/__init__.py @@ -5,7 +5,7 @@ def make_local_call(call): """Function wrapper for making a simple call to shell""" - process = subprocess.Popen(call, stdout=subprocess.PIPE) + process = subprocess.Popen(call, stdout=subprocess.PIPE) # NOSONAR stdout, stderr = process.communicate() if process.returncode == 0: return str(stdout).strip() diff --git a/acktools/log.py b/acktools/log.py index 742603cc..10b8b9b8 100644 --- a/acktools/log.py +++ b/acktools/log.py @@ -48,8 +48,8 @@ def configure_log(name, path, to_stdout=True): fileh.setFormatter(formatter) log.addHandler(fileh) except IOError, e: - print "Error writing to file handler. Ignoring." - print str(e) + print("Error writing to file handler. Ignoring.") + print(str(e)) if to_stdout: try: @@ -57,8 +57,8 @@ def configure_log(name, path, to_stdout=True): sth.setLevel(logging.DEBUG) log.addHandler(sth) except IOError, e: - print "Error writing to stdout handler. Ignoring." - print str(e) + print("Error writing to stdout handler. Ignoring.") + print(str(e)) return log diff --git a/acktools/net/__init__.py b/acktools/net/__init__.py index 7713d344..b584b073 100644 --- a/acktools/net/__init__.py +++ b/acktools/net/__init__.py @@ -8,7 +8,7 @@ def generate_mac(): Care should be taken to ensure duplicates are not used. """ mac = [0x00, 0x16, 0x3e, - random.randint(0x00, 0x7f), - random.randint(0x00, 0xff), - random.randint(0x00, 0xff)] + random.randint(0x00, 0x7f), # NOSONAR + random.randint(0x00, 0xff), # NOSONAR + random.randint(0x00, 0xff)] # NOSONAR return ':'.join(map(lambda x: "%02x" % x, mac)) diff --git a/autocertkit/ack_cli.py b/autocertkit/ack_cli.py index 3cc90167..8a782ffe 100755 --- a/autocertkit/ack_cli.py +++ b/autocertkit/ack_cli.py @@ -63,8 +63,8 @@ def get_xapi_session(config): def parse_cmd_args(): - parser = OptionParser( - usage="%prog [options]", version="%prog @KIT_VERSION@") + parser = OptionParser( # NOSONAR + usage="%prog [options]", version="%prog @KIT_VERSION@") # NOSONAR parser.add_option("-d", "--debug", dest="debug", @@ -142,6 +142,12 @@ def parse_cmd_args(): for k, v in kvp_rec.iteritems(): config[k] = v + check_files(config) + + return config + + +def check_files(config): # Check if files exist file_opts = [("vpx_dlvm_file", "VPX DLVM file")] for opt, label in file_opts: @@ -155,8 +161,6 @@ def parse_cmd_args(): assert_file_exists(os.path.join( INSTALL_DIR, vf_driver_pkg), "VF driver rpm package") - return config - def kvp_string_to_rec(string): """Take an input string 'a=b,c=d,e=f' and return the record @@ -196,85 +200,93 @@ def parse_netconf_file(filename): rec = {} for section in cp.sections(): if section.startswith('eth'): - # Ethernet Interface - utils.log.debug("Ethernet Interface: '%s'" % section) - - # Network ID is a label of the physical network the adapter has been connected to - # and should be uniform across all adapters. - network_id = cp.get(section, 'network_id') - utils.log.debug("Network IDs: '%s'" % network_id) - try: - network_id = int(network_id) - except: - raise utils.InvalidArgument('Network IDs for %s' % section, network_id, - 'should be integer') - - # Parse VLAN IDs - vlan_ids = "" - if cp.has_option(section, 'vlan_ids'): - vlan_ids = cp.get(section, 'vlan_ids') - utils.log.debug("VLAN IDs: '%s'" % vlan_ids) - try: - vlan_ids = [int(id.strip()) for id in vlan_ids.split(',')] - except: - raise utils.InvalidArgument('VLAN IDs for %s' % section, vlan_ids, - 'should be integer with comma as delimiter if multiple') - # Ensure that the specified VLAN is valid - for vlan_id in vlan_ids: - if vlan_id > MAX_VLAN or vlan_id < MIN_VLAN: - raise utils.InvalidArgument('VLAN ID for %s' % section, vlan_id, '%d < x < %d' % - (MIN_VLAN, MAX_VLAN)) - - # VF driver info for SR-IOV test - vf_driver_name = "" - if cp.has_option(section, 'vf_driver_name'): - vf_driver_name = cp.get(section, 'vf_driver_name') - vf_driver_pkg = "" - if cp.has_option(section, 'vf_driver_pkg'): - vf_driver_pkg = cp.get(section, 'vf_driver_pkg') - utils.log.debug("VF Driver Name: '%s'" % vf_driver_name) - utils.log.debug("VF Driver Pkg: '%s'" % vf_driver_pkg) - - # User is able to specify maxinum VF number per PF to test - max_vf_num = "" - if cp.has_option(section, 'max_vf_num'): - max_vf_num = cp.get(section, 'max_vf_num') - if max_vf_num: - try: - max_vf_num = int(max_vf_num) - except: - raise utils.InvalidArgument('Maxinum VF number for %s' % section, max_vf_num, - 'should be integer') - if max_vf_num <= 1: - raise utils.InvalidArgument('Maxinum VF number for %s' % section, max_vf_num, - 'should be greater than 1') - max_vf_num = str(max_vf_num) - utils.log.debug( - "Maxinum VF number per PF to test: '%s'" % max_vf_num) - - rec[section] = {'network_id': network_id, 'vlan_ids': vlan_ids, - 'vf_driver_name': vf_driver_name, 'vf_driver_pkg': vf_driver_pkg, - 'max_vf_num': max_vf_num} + parse_section_iface(cp, rec, section) elif section == "static_management": rec[section] = parse_static_config(cp, section) elif section.startswith('static'): - # Definition of network properties (e.g. dhcp/static) - arr = section.split('_') - if len(arr) != 3: - raise utils.InvalidArgument('static addressing section', section, - 'should be in format of "static__"') - net = arr[1] - vlan = arr[2] - if not unicode(net.strip()).isdecimal() or not unicode(vlan.strip()).isdecimal(): - raise utils.InvalidArgument('static addressing section', section, - 'should be valid network and/or vlan to determine') - rec[section] = parse_static_config(cp, section) + parse_section_static_net(cp, rec, section) else: raise Exception("Error: Unknown section: '%s'" % section) return rec +def parse_section_iface(cp, rec, section): + # Ethernet Interface + utils.log.debug("Ethernet Interface: '%s'" % section) + + # Network ID is a label of the physical network the adapter has been connected to + # and should be uniform across all adapters. + network_id = cp.get(section, 'network_id') + utils.log.debug("Network IDs: '%s'" % network_id) + try: + network_id = int(network_id) + except: + raise utils.InvalidArgument('Network IDs for %s' % section, network_id, + 'should be integer') + + # Parse VLAN IDs + vlan_ids = "" + if cp.has_option(section, 'vlan_ids'): + vlan_ids = cp.get(section, 'vlan_ids') + utils.log.debug("VLAN IDs: '%s'" % vlan_ids) + try: + vlan_ids = [int(id.strip()) for id in vlan_ids.split(',')] + except: + raise utils.InvalidArgument('VLAN IDs for %s' % section, vlan_ids, + 'should be integer with comma as delimiter if multiple') + # Ensure that the specified VLAN is valid + for vlan_id in vlan_ids: + if vlan_id > MAX_VLAN or vlan_id < MIN_VLAN: + raise utils.InvalidArgument('VLAN ID for %s' % section, vlan_id, '%d < x < %d' % + (MIN_VLAN, MAX_VLAN)) + + # VF driver info for SR-IOV test + vf_driver_name = "" + if cp.has_option(section, 'vf_driver_name'): + vf_driver_name = cp.get(section, 'vf_driver_name') + vf_driver_pkg = "" + if cp.has_option(section, 'vf_driver_pkg'): + vf_driver_pkg = cp.get(section, 'vf_driver_pkg') + utils.log.debug("VF Driver Name: '%s'" % vf_driver_name) + utils.log.debug("VF Driver Pkg: '%s'" % vf_driver_pkg) + + # User is able to specify maxinum VF number per PF to test + max_vf_num = "" + if cp.has_option(section, 'max_vf_num'): + max_vf_num = cp.get(section, 'max_vf_num') + if max_vf_num: + try: + max_vf_num = int(max_vf_num) + except: + raise utils.InvalidArgument('Maxinum VF number for %s' % section, max_vf_num, + 'should be integer') + if max_vf_num <= 1: + raise utils.InvalidArgument('Maxinum VF number for %s' % section, max_vf_num, + 'should be greater than 1') + max_vf_num = str(max_vf_num) + utils.log.debug( + "Maxinum VF number per PF to test: '%s'" % max_vf_num) + + rec[section] = {'network_id': network_id, 'vlan_ids': vlan_ids, + 'vf_driver_name': vf_driver_name, 'vf_driver_pkg': vf_driver_pkg, + 'max_vf_num': max_vf_num} + + +def parse_section_static_net(cp, rec, section): + # Definition of network properties (e.g. dhcp/static) + arr = section.split('_') + if len(arr) != 3: + raise utils.InvalidArgument('static addressing section', section, + 'should be in format of "static__"') + net = arr[1] + vlan = arr[2] + if not unicode(net.strip()).isdecimal() or not unicode(vlan.strip()).isdecimal(): + raise utils.InvalidArgument('static addressing section', section, + 'should be valid network and/or vlan to determine') + rec[section] = parse_static_config(cp, section) + + def assert_file_exists(file_name, label): """Check whether a file exists, if it doesn't, raise an exception""" if not os.path.isfile(file_name): @@ -287,12 +299,12 @@ def validate_param(value, possibles, arg_name): raise utils.InvalidArgument(arg_name, value, possibles) -def parse_static_config(configParser, section): +def parse_static_config(config_parser, section): """Parse a ini section specifying static networking config for droid VMs to use.""" utils.log.debug("Read section '%s'" % section) config = {} for option in ['ip_start', 'ip_end', 'netmask', 'gw']: - config[option] = configParser.get(section, option) + config[option] = config_parser.get(section, option) utils.log.debug("Get option %s = '%s'" % (option, config[option])) if not config[option]: raise utils.InvalidArgument( @@ -415,10 +427,7 @@ def generate_test_config(session, config, test_run_file): fh.close() -@utils.log_exceptions -def pre_flight_checks(session, config): - """Check for some of the common problems""" - +def pre_flight_check_host(session): # Check for a run in progress if check_for_process(): raise Exception( @@ -442,6 +451,12 @@ def pre_flight_checks(session, config): if not avail_storage: raise Exception("Error: host '%s' has no available storage.") + +@utils.log_exceptions +def pre_flight_checks(session, config): + """Check for some of the common problems""" + pre_flight_check_host(session) + # Check that we have at least two network adaptors, on the same network recs = config['netconf'] ifaces = {} diff --git a/autocertkit/cpu_tests.py b/autocertkit/cpu_tests.py index d3287c2c..4372ef72 100644 --- a/autocertkit/cpu_tests.py +++ b/autocertkit/cpu_tests.py @@ -38,18 +38,6 @@ class PerfTestClass(testbase.CPUTestClass): """A somewhat generic test class for CPU performance tests that could be expanded to include additional plugin-based tasks""" - # Deine the test timeout in seconds and the number of test VMs - timeout = 3600 - vm_count = 3 - - # SSH command variables - username = 'root' - password = DEFAULT_PASSWORD - - # Class variables - test = '' - cmd_str = '' - def _setup_vms(self, session): """Create vm_count VMs on the master host and return a list of VM ref objects""" @@ -61,66 +49,6 @@ def _setup_vms(self, session): self.vm_count, {net_ref: self.get_static_manager(net_ref)})[host_ref] - def _call_plugin(self, session, vm_ref_list, call): - """Generic plugin call modified for this test class""" - res = [] - for vm_ref in vm_ref_list: - res.append(call_ack_plugin(self.session, call, - {'vm_ref': vm_ref, - 'mip': get_context_vm_mip(vm_ref), - 'username': self.username, - 'password': self.password})) - return res - - def _create_test_threads(self, session, vm_ref_list): - """Spawns a new non-blocking test thread for each VM and - returns a reference object to these threads. Each thread is - a timeout function of function self.cmd_str which is run on - the master host by the XenAPI plugin""" - threads = [] - for vm_ref in vm_ref_list: - threads.append(create_test_thread(lambda vm=vm_ref: TimeoutFunction(ssh_command(get_context_vm_mip(vm), - self.username, - self.password, - self.cmd_str, - timeout=self.timeout)["stdout"], - self.timeout, '%s test timed out %d' % (self.test, self.timeout)))) - return threads - - def _run_test(self, session): - """Main run fuction. Sets up the VMs, deploys the test, - spawns the test threads, and tracks the threads until they - all complete""" - # setup vms - vm_ref_list = self._setup_vms(session) - - # Make certain the VMs are available - for vm_ref in vm_ref_list: - check_vm_ping_response(session, vm_ref, get_context_vm_mip(vm_ref)) - - # deploy test rpms - log.debug("Deploying test RPMs") - self._call_plugin(session, vm_ref_list, 'deploy_' + self.test) - - # create and start test threads, wait until complete - log.debug("About to run %s test..." % self.test) - threads = self._create_test_threads(session, vm_ref_list) - - # Wait for the threads to finish running or timeout - start = time.time() - while check_test_thread_status(threads): - time.sleep(1) - if should_timeout(start, self.timeout): - raise Exception("%s test timed out %s" % - (self.test, self.timeout)) - - # retrieve the logs - log.debug("%s test is complete, retrieving logs" % self.test) - res = self._call_plugin(session, vm_ref_list, - 'retrieve_' + self.test + '_logs') - - return {'info': 'Test ran successfully'} - def test_lmbench(self, session): """Perform the LMBench CPU benchmark""" self.test = 'lmbench' diff --git a/autocertkit/models.py b/autocertkit/models.py index 2baa2a0c..8ee65200 100644 --- a/autocertkit/models.py +++ b/autocertkit/models.py @@ -228,7 +228,7 @@ def __init__(self, parent, testclass_node): def get_caps(self): """ Return a list of caps supported by this device based on the tests that have passed/failed """ - return eval(self.config['caps']) + return eval(self.config['caps']) # NOSONAR def get_order(self): """Return the integer number specified by the test class to indicate @@ -376,15 +376,15 @@ def __init__(self, xml_device_node): self.udid = self.config['udid'] # Unique device id # We only care about child element nodes - childElems = [node for node in xml_device_node.childNodes - if node.nodeType == node.ELEMENT_NODE] + child_elems = [node for node in xml_device_node.childNodes + if node.nodeType == node.ELEMENT_NODE] # We expect there to be one child node 'certification_tests' - if len(childElems) != 1: + if len(child_elems) != 1: raise Exception( - "Error: unexpected XML format. Should only be one child node: %s" % childElems) + "Error: unexpected XML format. Should only be one child node: %s" % child_elems) - xml_cert_tests_node = childElems[0] + xml_cert_tests_node = child_elems[0] test_class_list = [] for test_node in get_child_elems(xml_cert_tests_node): @@ -410,11 +410,11 @@ def get_id(self): if self.tag == "CPU": return get_cpu_id(self.config['modelname']) if self.tag == "LS": - PCI_id = self.config['vendor'] + ":" + self.config["device"] - return PCI_id + pci_id = self.config['vendor'] + ":" + self.config["device"] + return pci_id if self.tag == "OP": - XS_id = "XenServer %s" % self.config['product_version'] - return XS_id + xs_id = "XenServer %s" % self.config['product_version'] + return xs_id except Exception, e: log.error("Exception occurred getting ID: '%s'" % str(e)) return "Unknown ID" @@ -434,11 +434,11 @@ def get_description(self): if self.tag == "CPU": return self.config['modelname'] if self.tag == "LS": - LS_info = "Storage device using the %s driver" % self.config[ + ls_info = "Storage device using the %s driver" % self.config[ 'driver'] if 'PCI_description' in self.config: - LS_info += "\n\t%s" % self.config['PCI_description'] - return LS_info + ls_info += "\n\t%s" % self.config['PCI_description'] + return ls_info if self.tag == "OP": build_id = "build %s" % self.config['build_number'] return build_id @@ -564,26 +564,17 @@ def print_report(self, stream): stream.write("%s: %s\n" % (k, reqval)) - if tests_passed: - stream.write("\nTests that passed:\n") - for test in tests_passed: - stream.write("%s\n" % test.name) - - if tests_failed_req: - stream.write("\nTests that failed:\n") - for test in tests_failed_req: - stream.write("%s\n" % test.name) - - if tests_failed_noreq: - stream.write("\nNone required tests that failed:\n") - for test in tests_failed_noreq: - stream.write("%s\n" % test.name) - - if tests_skipped_req or tests_skipped_noreq: - stream.write("\nTests that skipped:\n") - for test in tests_skipped_req: - stream.write("%s\n" % test.name) - for test in tests_skipped_noreq: + self.print_results(stream, tests_passed, "Tests that passed:") + self.print_results(stream, tests_failed_req, "Tests that failed:") + self.print_results(stream, tests_failed_noreq, + "None required tests that failed:") + self.print_results(stream, tests_skipped_req + + tests_skipped_noreq, "Tests that skipped:") + + def print_results(self, stream, res, header): + if res: + stream.write("\n" + header + "\n") + for test in res: stream.write("%s\n" % test.name) @@ -650,15 +641,7 @@ def get_next_test_class(self, tc_info=None): continue # Get the test class still to run tcs = device.get_test_classes_to_run() - for tc in tcs: - if tc_info and 'test_class' in tc_info and tc_info['test_class'] not in tc.get_name(): - continue - if tc_info and 'test_method' in tc_info and not tc.get_method_by_name(tc_info['test_method']): - continue - - # Append a tuple - (test_class, order) - # Order index will be used below for sorting. - tcs_to_run.append((tc, tc.get_order())) + self.get_next_test_classes(tcs_to_run, tcs, tc_info) if not tcs_to_run: if tc_info: @@ -675,6 +658,17 @@ def get_next_test_class(self, tc_info=None): # Return the test class at the top of the list return tcs_to_run.pop()[0] + def get_next_test_classes(self, tcs_to_run, tcs, tc_info): + for tc in tcs: + if tc_info and 'test_class' in tc_info and tc_info['test_class'] not in tc.get_name(): + continue + if tc_info and 'test_method' in tc_info and not tc.get_method_by_name(tc_info['test_method']): + continue + + # Append a tuple - (test_class, order) + # Order index will be used below for sorting. + tcs_to_run.append((tc, tc.get_order())) + def get_next_test(self): """Get the next test class and method to run""" tcs_to_run = [] diff --git a/autocertkit/network_tests.py b/autocertkit/network_tests.py index 758fdf66..aa0cb8b8 100644 --- a/autocertkit/network_tests.py +++ b/autocertkit/network_tests.py @@ -63,30 +63,22 @@ def __init__(self, session, server_vm_ref, network_ref, static_manager, - username='root', - password=DEFAULT_PASSWORD, - config=None, - vm_info=None, - multicast_ip="", - max_retry_on_failure=3): + opt): self.session = session self.server = server_vm_ref self.client = client_vm_ref self.network = network_ref self.static_manager = static_manager - self.username = username - self.password = password - self.multicast_ip = multicast_ip - self.max_retry_on_failure = max_retry_on_failure + self.username = opt.get('username', 'root') + self.password = opt.get('password', DEFAULT_PASSWORD) + self.multicast_ip = opt.get('multicast_ip', '') + self.max_retry_on_failure = opt.get('max_retry_on_failure', 3) # Interface and IP etc on server/client to (t)est and (m)anagement - self.vm_info = vm_info + self.vm_info = opt.get('vm_info', None) - if not config: - self.config = self.default_config - else: - self.config = config + self.config = opt.get('config', self.default_config) # Store pool master in order to make plugin calls self.host = get_pool_master(self.session) @@ -184,7 +176,7 @@ def configure_routes(self): {'vm_ref': self.client, 'mip': self.vm_info[self.client]['ip_m'], 'dest_ip': self.multicast_ip, - 'mask': '240.0.0.0', + 'mask': '240.0.0.0', # NOSONAR 'device': self.vm_info[self.client]['iface_t'], 'src': self.vm_info[self.client]['ip_t']} ) @@ -193,7 +185,7 @@ def configure_routes(self): {'vm_ref': self.server, 'mip': self.vm_info[self.server]['ip_m'], 'dest_ip': self.multicast_ip, - 'mask': '240.0.0.0', + 'mask': '240.0.0.0', # NOSONAR 'device': self.vm_info[self.server]['iface_t'], 'src': self.vm_info[self.server]['ip_t']} ) @@ -591,8 +583,6 @@ def _setup_network(self, session): def _setup_vms(self, session, network_refs): """Util function for returning VMs to run IPerf test on, can be subclassed to run different configurations""" - log.debug("Setting up VM - VM cross host test") - # Setup default static manager with the available interfaces sms = {} for network_ref in network_refs: @@ -662,12 +652,11 @@ def _run_test(self, session, direction): log.debug("Server IPerf VM ref: %s" % server) log.debug("About to run iperf test...") - iperf_data = IperfTest(session, client, server, - self.network_for_test, - self.get_static_manager( - self.network_for_test), - config=self.IPERF_ARGS, - multicast_ip=self.MULTICAST_IP).run() + iperf_data = IperfTest(session, client, server, self.network_for_test, + self.get_static_manager(self.network_for_test), + {'config': self.IPERF_ARGS, + 'multicast_ip': self.MULTICAST_IP} + ).run() return {'info': 'Test ran successfully', 'data': iperf_data, @@ -878,8 +867,6 @@ def _setup_network(self, session): def _setup_vms(self, session, net_refs): """Util function for returning VMs to run large MTU ping test on""" - log.debug("Setting up VM - VM cross host test") - sms = {} for net_ref in net_refs: sms[net_ref] = self.get_static_manager(net_ref) @@ -898,20 +885,19 @@ def _run_test(self, session): # retrieve VM IPs vm1_dev, _, vm1_ip = get_context_vm_mif(vm1_ref) - log.debug("VM %s has IP %s (iface: %s)" % (vm1_ref, vm1_ip, vm1_dev)) + log_str = "VM %s has IP %s (iface: %s)" + log.debug(log_str % (vm1_ref, vm1_ip, vm1_dev)) vm2_dev, _, vm2_ip = get_context_vm_mif(vm2_ref) - log.debug("VM %s has IP %s (iface: %s)" % (vm2_ref, vm2_ip, vm2_dev)) + log.debug(log_str % (vm2_ref, vm2_ip, vm2_dev)) vm1_test_dev, vm1_test_mac, vm1_test_ip \ = get_context_test_ifs(vm1_ref)[0] - log.debug("VM %s has IP %s (iface: %s)" % - (vm1_ref, vm1_test_ip, vm1_test_dev)) + log.debug(log_str % (vm1_ref, vm1_test_ip, vm1_test_dev)) vm2_test_dev, vm2_test_mac, vm2_test_ip \ = get_context_test_ifs(vm2_ref)[0] - log.debug("VM %s has IP %s (iface: %s)" % - (vm2_ref, vm2_test_ip, vm2_test_dev)) + log.debug(log_str % (vm2_ref, vm2_test_ip, vm2_test_dev)) # Add explicit IP routes to ensure MTU traffic travels # across the correct interface. @@ -948,6 +934,7 @@ def _run_test(self, session): log.debug("Attempt normal ping first...") ping_result = ping(vm1_ip, vm2_test_ip, vm1_test_dev) + log.debug("Normal result: %s" % ping_result) log.debug("Moving onto large MTU ping...") log.debug("Ping Arguments: %s" % self.PING_ARGS) @@ -982,7 +969,7 @@ class MulticastTestClass(IperfTestClass): IPERF_ARGS = {'format': 'm', 'thread_count': '4'} - MULTICAST_IP = '226.94.1.1' + MULTICAST_IP = '226.94.1.1' # NOSONAR class GROOffloadTestClass(testbase.NetworkTestClass): @@ -1068,14 +1055,7 @@ def _run_test(self, session, direction): # choose 2 VM and perform IPerf test self.iperf_test(session, ret, vm_list[0], vm_list[1], direction) - # disable sriov - for i in vm_list: - destroy_vm(session, i) - log.debug("Disable VF begin") - # network_sriov may be synced to slave host, so here destroy all, rather than just sriov_net_ref - for i in session.xenapi.network_sriov.get_all(): - log.debug("Destory network_sriov: %s" % i) - session.xenapi.network_sriov.destroy(i) + self._disable_vf(session, vm_list) if self.control == "enabled": # need to reboot at first @@ -1093,6 +1073,15 @@ def _run_test(self, session, direction): return ret + def _disable_vf(self, session, vm_list): + for i in vm_list: + destroy_vm(session, i) + log.debug("Disable VF begin") + # network_sriov may be synced to slave host, so here destroy all, rather than just sriov_net_ref + for i in session.xenapi.network_sriov.get_all(): + log.debug("Destory network_sriov: %s" % i) + session.xenapi.network_sriov.destroy(i) + def _check_sriov_cap(self, session): device = self.config['device_config']['Kernel_name'] has_sriov = has_sriov_cap(session, device) @@ -1138,8 +1127,6 @@ def _enable_vf(self, session, tried=False): return (False, net_ref, net_sriov_ref) def _setup_vms(self, session, network_refs): - log.debug("Setting up VM - VM cross host test") - # Setup default static manager with the available interfaces sms = {} networks_slave, networks_master = network_refs[0], network_refs[1] @@ -1180,26 +1167,16 @@ def iperf_test(self, session, result, vm1_ref, vm2_ref, direction): log.debug("IPerf client VM ref: %s" % client) log.debug("About to run SR-IOV IPerf test...") - iperf_data = IperfTest(session, client, server, - None, None, - config=self.IPERF_ARGS).run() + iperf_data = IperfTest(session, client, server, None, None, + {'config': self.IPERF_ARGS}).run() self.set_data(result, iperf_data) def ops_test(self, session, vms): + """It's an interface with default, can be overwritten in child class""" pass -''' -# Remove the test at present because of feature limit in CA-285893 -class IntraHostSRIOVTestClass1(InterHostSRIOVTestClass): - """Iperf test between VF (in VM1 on master) and VIF (in VM2 on master)""" - - def deploy_droid_vms(self, session, vf_driver, network_refs, sms): - return deploy_two_droid_vms_for_sriov_intra_host_test_vf_to_vif(session, vf_driver, network_refs, sms) -''' - - class IntraHostSRIOVTestClass1(InterHostSRIOVTestClass): """Iperf test between VF (in VM1 on master) and VF (in VM2 on master)""" diff --git a/autocertkit/operations_tests.py b/autocertkit/operations_tests.py index b5821647..bfdee1ee 100644 --- a/autocertkit/operations_tests.py +++ b/autocertkit/operations_tests.py @@ -56,7 +56,8 @@ def test_vm_power_control(self, session): state a predefined number of times""" vm_ref_list = self._setup_vms(session) for i in range(3): - log.debug("Starting test run %d of %d" % (i + 1, range(3)[-1] + 1)) + log.debug("Starting test (power control) run %d of %d" % + (i + 1, 3)) # Make certain the VMs are available for vm_ref in vm_ref_list: @@ -70,10 +71,10 @@ def test_vm_power_control(self, session): particular issue with Python variable bindings within loops""" task_list = [(lambda x=vm_ref: session.xenapi.Async.VM.clean_shutdown(x)) for vm_ref in vm_ref_list] - res = run_xapi_async_tasks(session, task_list) + run_xapi_async_tasks(session, task_list) # Verify the VMs report a 'Halted' power state - log.debug("Verrifying VM power control operations") + log.debug("Verrifying VM power control operations for 'Halted'") for vm_ref in vm_ref_list: if session.xenapi.VM.get_power_state(vm_ref) != 'Halted': raise Exception( @@ -89,10 +90,10 @@ def test_vm_power_control(self, session): False, False)) for vm_ref in vm_ref_list] - res = run_xapi_async_tasks(session, task_list) + run_xapi_async_tasks(session, task_list) # Verify the VMs report a 'Running' power state - log.debug("Verrifying VM power control operations") + log.debug("Verrifying VM power control operations for 'Running'") for vm_ref in vm_ref_list: if session.xenapi.VM.get_power_state(vm_ref) != 'Running': raise Exception( @@ -100,8 +101,8 @@ def test_vm_power_control(self, session): log.debug("VM %s is running" % vm_ref) log.debug("Verrification complete: All VMs have booted") - log.debug("Test run %d of %d has completed successfully" % - (i + 1, range(3)[-1] + 1)) + log.debug("Test (power control) run %d of %d has completed successfully" % + (i + 1, 3)) wait_for_vms_ips(session, vm_ref_list) @@ -115,7 +116,7 @@ def test_vm_reboot(self, session): them a predefined number of times""" vm_ref_list = self._setup_vms(session) for i in range(3): - log.debug("Starting test run %d of %d" % (i + 1, range(3)[-1] + 1)) + log.debug("Starting test (reboot) run %d of %d" % (i + 1, 3)) # Make certain the VMs are available for vm_ref in vm_ref_list: @@ -126,18 +127,19 @@ def test_vm_reboot(self, session): log.debug("Rebooting VMs: %s" % vm_ref_list) task_list = [(lambda x=vm_ref: session.xenapi.Async.VM.clean_reboot(x)) for vm_ref in vm_ref_list] - res = run_xapi_async_tasks(session, task_list) + run_xapi_async_tasks(session, task_list) # Verify the VMs report a 'Running' power state - log.debug("Verrifying VM power control operations") + log.debug( + "Verrifying VM power control operations (reboot) for 'Running'") for vm_ref in vm_ref_list: if session.xenapi.VM.get_power_state(vm_ref) != 'Running': raise Exception("ERROR: Unexpected power state") - log.debug("VM %s is running" % vm_ref) + log.debug("VM %s is running after rebooting" % vm_ref) log.debug("Verrification complete: All VMs have rebooted") - log.debug("Test run %d of %d has completed successfully" % - (i + 1, range(3)[-1] + 1)) + log.debug("Test (reboot) run %d of %d has completed successfully" % + (i + 1, 3)) wait_for_vms_ips(session, vm_ref_list) @@ -151,7 +153,7 @@ def test_vm_suspend(self, session): suspend/resume functionality through three test runs""" vm_ref_list = self._setup_vms(session) for i in range(3): - log.debug("Starting test run %d of %d" % (i + 1, range(3)[-1] + 1)) + log.debug("Starting test (suspend) run %d of %d" % (i + 1, 3)) # Make certain the VMs are available for vm_ref in vm_ref_list: @@ -163,13 +165,13 @@ def test_vm_suspend(self, session): task_list = [(lambda x=vm_ref: session.xenapi.Async.VM.suspend(x)) for vm_ref in vm_ref_list] start = time.time() - res = run_xapi_async_tasks(session, task_list, 1200) + run_xapi_async_tasks(session, task_list, 1200) suspend_time = time.time() - start log.debug( "Suspend operation returned complete in %s seconds" % suspend_time) # Verify the VMs report a 'Suspended' power state - log.debug("Verrifying VM power control operations") + log.debug("Verrifying VM power control operations for 'Suspended'") for vm_ref in vm_ref_list: if session.xenapi.VM.get_power_state(vm_ref) != 'Suspended': raise Exception("ERROR: VM %s did not suspend" % vm_ref) @@ -184,18 +186,19 @@ def test_vm_suspend(self, session): False, False)) for vm_ref in vm_ref_list] - res = run_xapi_async_tasks(session, task_list) + run_xapi_async_tasks(session, task_list) # Verify the VMs report a 'Running' power state - log.debug("Verrifying VM power control operations") + log.debug( + "Verrifying VM power control operations (suspend) for 'Running'") for vm_ref in vm_ref_list: if session.xenapi.VM.get_power_state(vm_ref) != 'Running': raise Exception("ERROR: VM %s did not resume" % vm_ref) - log.debug("VM %s is running" % vm_ref) + log.debug("VM %s is running after suspending" % vm_ref) log.debug("Verrification complete: All VMs have resumed") - log.debug("Test run %d of %d has completed successfully" % - (i + 1, range(3)[-1] + 1)) + log.debug("Test (suspend) run %d of %d has completed successfully" % + (i + 1, 3)) wait_for_vms_ips(session, vm_ref_list) @@ -209,7 +212,7 @@ def test_vm_relocation(self, session): the master host and the master host""" vm_ref_list = self._setup_vms(session) for i in range(3): - log.debug("Starting test run %d of %d" % (i + 1, range(3)[-1] + 1)) + log.debug("Starting test (relocation) run %d of %d" % (i + 1, 3)) # Make certain the VMs are available for vm_ref in vm_ref_list: @@ -223,19 +226,20 @@ def test_vm_relocation(self, session): host_ref, {'live': 'true'})) for vm_ref in vm_ref_list] - res = run_xapi_async_tasks(session, task_list) + run_xapi_async_tasks(session, task_list) # Verify the VMs report a 'Running' power state - log.debug("Verrifying VM power control operations") + log.debug( + "Verrifying VM power control operations (relocation) for 'Running'") for vm_ref in vm_ref_list: if session.xenapi.VM.get_power_state(vm_ref) != 'Running': raise Exception("ERROR: Unexpected power state") - log.debug("VM %s is running" % vm_ref) + log.debug("VM %s is running after relocating" % vm_ref) log.debug( "Verrification complete: All VMs have been relocated and are running") - log.debug("Test run %d of %d has completed successfully" % - (i + 1, range(3)[-1] + 1)) + log.debug("Test (relocation) run %d of %d has completed successfully" % + (i + 1, 3)) wait_for_vms_ips(session, vm_ref_list) diff --git a/autocertkit/ssh.py b/autocertkit/ssh.py index 6ed3ee9b..d434c7b7 100644 --- a/autocertkit/ssh.py +++ b/autocertkit/ssh.py @@ -55,14 +55,14 @@ def __init__(self, self.log = log self.debug = False self.trans = None - for tries in range(3): + for retry in range(3): self.trans = None try: self.connect(ip, username, password, timeout) except Exception, e: log.error(traceback.format_exc()) desc = str(e) - log.error("SSH exception %s" % (desc)) + log.error("SSH retry %d exception %s" % (retry, desc)) if string.find(desc, "Signature verification") > -1 or \ string.find(desc, "Error reading SSH protocol banner") > -1: @@ -89,7 +89,7 @@ def __init__(self, # If we get here we have successfully opened a connection return # Even after retry(s) we didn't get a connection - self.reply = "SSH connection failed" + self.reply = "SSH connection all tries failed" self.toreply = 1 self.close() @@ -156,16 +156,13 @@ def __del__(self): class SSHCommand(SSHSession): """An SSH session guarded for target lockups.""" - def __init__(self, - ip, - command, - log, - username="root", - timeout=300, - password=None, - nowarn=False, - nolog=False, - combineStderr=False): + def __init__(self, ip, command, username, password, opt): + log = opt['log'] + timeout = opt.get('timeout', 300) + nowarn = opt.get('nowarn', False) + nolog = opt.get('nolog', False) + combine_stderr = opt.get('combine_stderr', False) + self.log = log if not nolog: log.debug("ssh %s@%s %s" % (username, ip, command)) @@ -182,13 +179,13 @@ def __init__(self, try: self.client = self.open_session() self.client.settimeout(timeout) - self.client.set_combine_stderr(combineStderr) + self.client.set_combine_stderr(combine_stderr) self.client.exec_command(command) self.client.shutdown(1) self.hStdout = self.client.makefile() - self.hStderr = None if combineStderr else self.client.makefile_stderr() + self.hStderr = None if combine_stderr else self.client.makefile_stderr() except Exception, e: - self.reply = "SSH connection failed", + self.reply = "SSH command executed failed: %s" % str(e), self.toreply = 1 self.close() @@ -212,14 +209,17 @@ def read_file(self, inf, outf=None, label="stdout"): else: reply += output - if not self.nolog: - self.log.debug("%s: %s" % - (label, (output[:-1] if output and output[-1] == '\n' else output))) + self.log_output(label, output) return reply - def read(self, outFile=None, errFile=None): + def log_output(self, label, output): + if not self.nolog: + self.log.debug("%s: %s" % + (label, (output[:-1] if output and output[-1] == '\n' else output))) + + def read(self, out_file=None, err_file=None): """Process the output and result of the command. - @:param outFile/errFile: Whether to write stdout/stderr to the file + @:param out_file/err_file: Whether to write stdout/stderr to the file None (Default) : just return stdout/stderr content Not None : write stdout/stderr content to the file, which is used for large content @:return dict including exit status, stdout and stderr @@ -231,8 +231,8 @@ def read(self, outFile=None, errFile=None): self.exit_status = self.client.recv_exit_status() if not self.nolog: self.log.debug("returncode: %d" % self.exit_status) - self.stdout = self.read_file(self.hStdout, outFile) - self.stderr = self.read_file(self.hStderr, errFile, label="stderr") \ + self.stdout = self.read_file(self.hStdout, out_file) + self.stderr = self.read_file(self.hStderr, err_file, label="stderr") \ if self.hStderr else "" # Local clean up. diff --git a/autocertkit/status.py b/autocertkit/status.py index 12d29e9e..4afeb522 100755 --- a/autocertkit/status.py +++ b/autocertkit/status.py @@ -77,7 +77,7 @@ def main(): # Check for manifest file if not os.path.exists(TEST_FILE): - print "4:Manifest file has not been created. Have run the kit? (Has an error occured?)" + print("4:Manifest file has not been created. Have run the kit? (Has an error occured?)") sys.exit(0) # Check for the python process @@ -88,24 +88,24 @@ def main(): try: ack_run = models.parse_xml(TEST_FILE) except: - print "5:An error has occured reading. %s" % TEST_FILE + print("5:An error has occured reading. %s" % TEST_FILE) sys.exit(1) p, f, s, w, r = ack_run.get_status() if w+r == 0: - print "0:Finished (Passed:%d, Failed:%d, Skipped:%d)" % (p, f, s) + print("0:Finished (Passed:%d, Failed:%d, Skipped:%d)" % (p, f, s)) elif not running and uptime_seconds <= 600 and r > 0: - print "3:Server rebooting... (Passed:%d, Failed:%d, Skipped:%d, Waiting:%d, Running:%d)" % ( - p, f, s, w, r) + print("3:Server rebooting... (Passed:%d, Failed:%d, Skipped:%d, Waiting:%d, Running:%d)" % ( + p, f, s, w, r)) elif not running and uptime_seconds > 600: - print "1:Process not running. An error has occurred. (Passed:%d, Failed:%d, Skipped: %d, Waiting:%d, Running:%d)" % ( - p, f, s, w, r) + print("1:Process not running. An error has occurred. (Passed:%d, Failed:%d, Skipped: %d, Waiting:%d, Running:%d)" % ( + p, f, s, w, r)) sys.exit(1) else: perc = float(p + f + s) / float(w + r + p + f + s) * 100 - print "2:Running - %d%% Complete (Passed:%d, Failed:%d, Skipped:%d, Waiting:%d, Running:%d)" % ( - perc, p, f, s, w, r) + print("2:Running - %d%% Complete (Passed:%d, Failed:%d, Skipped:%d, Waiting:%d, Running:%d)" % ( + perc, p, f, s, w, r)) if __name__ == "__main__": diff --git a/autocertkit/storage_tests.py b/autocertkit/storage_tests.py index 672c4ff1..b231a247 100644 --- a/autocertkit/storage_tests.py +++ b/autocertkit/storage_tests.py @@ -39,18 +39,6 @@ class PerfTestClass(testbase.LocalStorageTestClass): performance tests that could be expanded to include additional plugin-based tasks""" - # Deine the test timeout in seconds and the number of test VMs - timeout = 3600 - vm_count = 3 - - # SSH command variables - username = 'root' - password = DEFAULT_PASSWORD - - # Class variables - test = '' - cmd_str = '' - def _setup_vms(self, session): """Creates vm_count VMs on the master host's local SR""" @@ -71,60 +59,6 @@ def _setup_vms(self, session): net_ref)}, sr_ref)[host_ref] - def _call_plugin(self, session, vm_ref_list, call): - """Util function to call ACK plugin method""" - res = [] - for vm_ref in vm_ref_list: - res.append(call_ack_plugin(self.session, call, - {'vm_ref': vm_ref, - 'mip': get_context_vm_mip(vm_ref), - 'username': self.username, - 'password': self.password})) - return res - - def _create_test_threads(self, session, vm_ref_list): - """Spawns a new test thread using the cmd_strin a - timeout function over SSH to every VM in vm_ref_list""" - threads = [] - for vm_ref in vm_ref_list: - threads.append(create_test_thread(lambda vm=vm_ref: TimeoutFunction(ssh_command(get_context_vm_mip(vm), - self.username, - self.password, - self.cmd_str), - self.timeout, '%s test timed out %d' % (self.test, self.timeout)))) - return threads - - def _run_test(self, session): - """Run test function""" - # setup vms - vm_ref_list = self._setup_vms(session) - - # Make certain the VMs are available - for vm_ref in vm_ref_list: - check_vm_ping_response(session, vm_ref, get_context_vm_mip(vm_ref)) - - # deploy test rpms - self._call_plugin(session, vm_ref_list, 'deploy_' + self.test) - - # create, start test threads, wait until complete - log.debug("About to run %s test..." % self.test) - threads = self._create_test_threads(session, vm_ref_list) - - # Wait for the threads to finish running or timeout - start = time.time() - while check_test_thread_status(threads): - time.sleep(1) - if should_timeout(start, self.timeout): - raise Exception("%s test timed out %s" % - (self.test, self.timeout)) - - # retrieve the logs - log.debug("%s test is complete, retrieving logs" % self.test) - res = self._call_plugin(session, vm_ref_list, - 'retrieve_' + self.test + '_logs') - - return {'info': 'Test ran successfully'} - def test_iozone(self, session): """Perform the IOZone Local Storage benchmark""" self.test = 'iozone' diff --git a/autocertkit/test_generators.py b/autocertkit/test_generators.py index 860cc8d2..8c22b348 100644 --- a/autocertkit/test_generators.py +++ b/autocertkit/test_generators.py @@ -61,14 +61,12 @@ def __init__(self, session, config, interface=None): self.session = session self.config = config self.interface = interface - # if not self.TAG: - # raise Exception("The TestGenerator class is generic. Please inherit for a specific device") self.prereq_check() def prereq_check(self): """Function for ensuring that specific prereq conditions are checked and raised before execution.""" - return + pass def select_test_by_config(self, test_classes): """Select test classes to run by config""" @@ -130,7 +128,6 @@ def get_device_config(self): return {} devices = utils.get_master_network_devices(self.session) for device_rec in devices: - print device_rec if device_rec['Kernel_name'] == self.interface: return device_rec raise Exception("Specified interface %s appears not to exist on master" % @@ -157,16 +154,8 @@ def append_xml_config(self, doc, xml_node): test_classes = self.get_test_classes() for test_class_name, test_class in test_classes: - skipthis = False xcp_version = utils.get_xcp_version(self.session) - if test_class.REQUIRED_FOR: - if utils.eval_expr(test_class.REQUIRED_FOR, xcp_version): - if not utils.REQ_CAP in test_class.caps: - test_class.caps.append(utils.REQ_CAP) - else: - if utils.REQ_CAP in test_class.caps: - test_class.caps.remove(utils.REQ_CAP) - skipthis = True + skip_this = self.set_test_class_cap(test_class, xcp_version) class_node = doc.createElement('test_class') class_node.setAttribute('name', test_class_name) @@ -177,43 +166,58 @@ def append_xml_config(self, doc, xml_node): test_methods = test_class(self.session, self.config).list_tests() for method in test_methods: - method_node = doc.createElement('test_method') - method_node.setAttribute('name', str(method)) - - # Result/Info fields - result_node = doc.createElement('result') - info_node = doc.createElement('info') - if skipthis: - result_node.appendChild(doc.createTextNode('skip')) - reason_node = doc.createElement('reason') - reason_node.appendChild(doc.createTextNode('%s is not required for XCP %s.' - % (test_class_name, xcp_version))) - method_node.appendChild(reason_node) - else: - result_node.appendChild(doc.createTextNode('NULL')) - - method_node.appendChild(result_node) - method_node.appendChild(info_node) - testname_node = doc.createElement('test_name') - testname_node.appendChild(doc.createTextNode('%s.%s' % - (test_class_name.split('.')[1], str(method)))) - method_node.appendChild(testname_node) - - status_node = doc.createElement('status') - if skipthis: - status_node.appendChild(doc.createTextNode('done')) - else: - status_node.appendChild(doc.createTextNode('init')) - control_node = doc.createElement('control') - method_node.appendChild(status_node) - method_node.appendChild(control_node) - - class_node.appendChild(method_node) + self.add_method_node( + doc, skip_this, test_class_name, xcp_version, class_node, method) cts_node.appendChild(class_node) xml_node.appendChild(device_node) + def set_test_class_cap(self, test_class, xcp_version): + if test_class.REQUIRED_FOR: + if utils.eval_expr(test_class.REQUIRED_FOR, xcp_version): + if not utils.REQ_CAP in test_class.caps: + test_class.caps.append(utils.REQ_CAP) + else: + if utils.REQ_CAP in test_class.caps: + test_class.caps.remove(utils.REQ_CAP) + return True + return False + + def add_method_node(self, doc, skipthis, test_class_name, xcp_version, class_node, method): + method_node = doc.createElement('test_method') + method_node.setAttribute('name', str(method)) + + # Result/Info fields + result_node = doc.createElement('result') + info_node = doc.createElement('info') + if skipthis: + result_node.appendChild(doc.createTextNode('skip')) + reason_node = doc.createElement('reason') + reason_node.appendChild(doc.createTextNode('%s is not required for XCP %s.' + % (test_class_name, xcp_version))) + method_node.appendChild(reason_node) + else: + result_node.appendChild(doc.createTextNode('NULL')) + + method_node.appendChild(result_node) + method_node.appendChild(info_node) + testname_node = doc.createElement('test_name') + testname_node.appendChild(doc.createTextNode('%s.%s' % + (test_class_name.split('.')[1], str(method)))) + method_node.appendChild(testname_node) + + status_node = doc.createElement('status') + if skipthis: + status_node.appendChild(doc.createTextNode('done')) + else: + status_node.appendChild(doc.createTextNode('init')) + control_node = doc.createElement('control') + method_node.appendChild(status_node) + method_node.appendChild(control_node) + + class_node.appendChild(method_node) + class NetworkAdapterTestGenerator(TestGenerator): """TestGenerator class specific for NA tests""" @@ -403,32 +407,32 @@ class OperationsXMLGenerator(DeviceXMLGenerator): def print_documentation(object_name): - print "--------- %s ---------" % utils.bold(object_name) - print "" + print("--------- %s ---------" % utils.bold(object_name)) + print("") classes = enumerate_all_test_classes() for test_class_name, test_class in classes: arr = (object_name).split('.') if test_class_name == object_name: # get the class info - print "%s: %s" % (utils.bold('Prereqs'), - test_class.required_config) - print "%s: %s" % (utils.bold('Collects'), test_class.collects) - print "" - print utils.format(test_class.__doc__) - print "" - print "%s:" % (utils.bold('Tests')) + print("%s: %s" % (utils.bold('Prereqs'), + test_class.required_config)) + print("%s: %s" % (utils.bold('Collects'), test_class.collects)) + print("") + print(utils.format(test_class.__doc__)) + print("") + print("%s:" % (utils.bold('Tests'))) inst = test_class(None, {}) for method in inst.list_tests(): - print method - print "" + print(method) + print("") sys.exit(0) elif len(arr) == 3 and ".".join(arr[:2]) == test_class_name: # get the method info - print utils.format(getattr(test_class, arr[2]).__doc__) - print "" + print(utils.format(getattr(test_class, arr[2]).__doc__)) + print("") sys.exit(0) - print "The test name specified (%s) was incorrect. Please specify the full test name." % object_name + print("The test name specified (%s) was incorrect. Please specify the full test name." % object_name) sys.exit(0) @@ -439,10 +443,10 @@ def enumerate_all_test_classes(): def print_all_test_classes(): - print "---------- %s ---------" % utils.bold("Test List") + print("---------- %s ---------" % utils.bold("Test List")) classes = enumerate_all_test_classes() for test_class_name, test_class in classes: obj = test_class('nonexistent_session', {}) for test_name in obj.list_tests(): - print "%s.%s" % (test_class_name, test_name) + print("%s.%s" % (test_class_name, test_name)) sys.exit(0) diff --git a/autocertkit/test_runner.py b/autocertkit/test_runner.py index 0a0aa2eb..34b66966 100644 --- a/autocertkit/test_runner.py +++ b/autocertkit/test_runner.py @@ -134,6 +134,30 @@ def remove_child_nodes(parent_node): node.unlink() +def recurse_add_records_to_node(topnode, record): + for k, v in record.iteritems(): + node = dom.createElement(k) + topnode.appendChild(node) + + if type(v) == dict: + # Set attributes for element + for key, value in v.iteritems(): + log.debug("Value = %s Type=%s" % + (str(value), str(type(value)))) + if type(value) == dict: + subnode = dom.createElement(key) + node.appendChild(subnode) + recurse_add_records_to_node(subnode, value) + elif type(value) == str: + node.setAttribute(str(key), str(value)) + elif type(v) == str or type(v) == int: + node.appendChild(dom.createTextNode(v)) + else: + log.warning("Casting node value to string %s who's type is %s" % ( + str(v), str(type(v)))) + node.appendChild(dom.createTextNode(str(v))) + + def update_xml_with_result(dom, class_node, results): """Update an xml config file object with results returned by a class test""" log.debug("Result Record: %s" % results) @@ -148,29 +172,6 @@ def update_xml_with_result(dom, class_node, results): method_node.setAttribute('name', test_name) class_node.appendChild(method_node) - def recurse_add_records_to_node(topnode, record): - for k, v in record.iteritems(): - node = dom.createElement(k) - topnode.appendChild(node) - - if type(v) == dict: - # Set attributes for element - for key, value in v.iteritems(): - log.debug("Value = %s Type=%s" % - (str(value), str(type(value)))) - if type(value) == dict: - subnode = dom.createElement(key) - node.appendChild(subnode) - recurse_add_records_to_node(subnode, value) - elif type(value) == str: - node.setAttribute(str(key), str(value)) - elif type(v) == str or type(v) == int: - node.appendChild(dom.createTextNode(v)) - else: - log.warning("Casting node value to string %s who's type is %s" % ( - str(v), str(type(v)))) - node.appendChild(dom.createTextNode(str(v))) - recurse_add_records_to_node(method_node, result) @@ -289,7 +290,8 @@ def get_test_class(fqtn): if __name__ == "__main__": # Main function entry point - parser = OptionParser(usage="%prog [-c] [-t]", version="%prog 0.1") + parser = OptionParser( # NOSONAR + usage="%prog [-c] [-t]", version="%prog 0.1") # NOSONAR parser.add_option("-t", "--test file", dest="testfile", diff --git a/autocertkit/testbase.py b/autocertkit/testbase.py index c090b4c3..11a1d9e0 100644 --- a/autocertkit/testbase.py +++ b/autocertkit/testbase.py @@ -82,7 +82,7 @@ def host_setup(self): operations that require a reboot. The test runner will handle re-executing the current test case when booting has finished""" - return + pass def run(self, debug=False, test_name=None): """Method for running all the tests in a class""" @@ -111,106 +111,9 @@ def run(self, debug=False, test_name=None): signal.alarm(0) rec = {} - try: - log.debug("******** %s.%s ********" % ( - self.__class__.__name__, test)) - - init_context() - - res = getattr(self, test)(self.session) - """ - Critical key and value in res: - 'status': 'init' initial status before running - 'running' test still running - 'done', test finished - 'result': 'skip' needless to run - 'pass' test OK - 'fail' test failed (with Exception occurs) - 'control': any private data of test itself. - 'superior': return common info to test runner from test, - test runner will handle and take general action, then remove it, - so it won't be saved into xml file. - currently it's used for rebooting hosts only. - """ - - log.debug("test return: %s" % res) - - if 'superior' in res: - rec['status'] = 'running' - rec['result'] = 'NULL' - rec['superior'] = res['superior'] - else: - rec['status'] = 'done' - rec['result'] = 'pass' - - def copy_field(rec, res, field, keep_tag=True): - if field in res: - rec[field] = res[field] - elif keep_tag: - rec[field] = "" - - copy_field(rec, res, 'control', False) - copy_field(rec, res, 'info') - copy_field(rec, res, 'data') - copy_field(rec, res, 'config') - copy_field(rec, res, 'reason', False) - copy_field(rec, res, 'warning', False) - - except Exception, e: - traceb = traceback.format_exc() - rec['status'] = 'done' - rec['result'] = 'fail' - rec['traceback'] = traceb - rec['exception'] = str(e) - log.error("Test Case Failure: %s" % str(test)) - log.debug(traceb) - if debug: - log.debug( - "Running in debug mode - exiting due to failure: %s" % str(e)) - sys.exit(0) - except: - traceb = traceback.format_exc() - exception = True - rec['status'] = 'done' - rec['result'] = 'fail' - rec['trackeback'] = traceb - rec['exception'] = "Unexpected error: %s" % sys.exc_info()[0] - log.debug(traceb) - if debug: - log.debug( - "Running in debug mode - exiting due to failure: %s" % sys.exc_info()[0]) - sys.exit(0) - - # cleanup occurs only when current test really done - if rec['status'] == 'done': - try: - need_reboot = pool_wide_cleanup(self.session) - except: - traceb = traceback.format_exc() - log.debug(traceb) - if debug: - log.debug( - "Running in debug mode - exiting due to failure: %s" % sys.exc_info()[0]) - sys.exit(0) - - log.debug("The general cleanup is failed") - # reset test result - if rec['result'] == 'pass': - rec['result'] = 'fail' - rec['trackeback'] = traceb - rec['exception'] = "Unexpected error: %s" % \ - sys.exc_info()[0] - else: - # If test done normally then noneed reboot even if cleanup requires, that indicates - # test itself should handle reboot requirement as one test step - # If test is done by exception and cleanup requires reboot then ask runner to reboot - if rec['result'] == 'pass' and need_reboot: - log.debug( - "Warning: test should handle reboot requirement") - elif rec['result'] == 'fail' and need_reboot: - rec['superior'] = 'reboot' - log.debug( - "Ask for hosts reboot because current test did not finish normally") + self.run_test(test, debug, rec) + + self.cleanup_test(debug, rec) log.debug("Test case %s, %s: %s.%s" % (rec['result'], rec['status'], self.__class__.__name__, test)) @@ -219,6 +122,110 @@ def copy_field(rec, res, field, keep_tag=True): return results + def copy_field(self, rec, res, field, keep_tag=True): + if field in res: + rec[field] = res[field] + elif keep_tag: + rec[field] = "" + + def run_test(self, test, debug, rec): + try: + log.debug("******** %s.%s ********" % ( + self.__class__.__name__, test)) + + init_context() + + res = getattr(self, test)(self.session) + """ + Critical key and value in res: + 'status': 'init' initial status before running + 'running' test still running + 'done', test finished + 'result': 'skip' needless to run + 'pass' test OK + 'fail' test failed (with Exception occurs) + 'control': any private data of test itself. + 'superior': return common info to test runner from test, + test runner will handle and take general action, then remove it, + so it won't be saved into xml file. + currently it's used for rebooting hosts only. + """ + + log.debug("test return: %s" % res) + + if 'superior' in res: + rec['status'] = 'running' + rec['result'] = 'NULL' + rec['superior'] = res['superior'] + else: + rec['status'] = 'done' + rec['result'] = 'pass' + + self.copy_field(rec, res, 'control', False) + self.copy_field(rec, res, 'info') + self.copy_field(rec, res, 'data') + self.copy_field(rec, res, 'config') + self.copy_field(rec, res, 'reason', False) + self.copy_field(rec, res, 'warning', False) + + except Exception, e: + traceb = traceback.format_exc() + rec['status'] = 'done' + rec['result'] = 'fail' + rec['traceback'] = traceb + rec['exception'] = str(e) + log.error("Test Case Failure: %s" % str(test)) + log.debug(traceb) + if debug: + log.debug( + "Running in debug mode - exiting due to Exception class: %s" % str(e)) + sys.exit(0) + except: + traceb = traceback.format_exc() + rec['status'] = 'done' + rec['result'] = 'fail' + rec['trackeback'] = traceb + rec['exception'] = "Unexpected error: %s" % sys.exc_info()[0] + log.debug(traceb) + if debug: + log.debug( + "Running in debug mode - exiting due to exception: %s" % sys.exc_info()[0]) + sys.exit(0) + + def cleanup_test(self, debug, rec): + # cleanup occurs only when current test really done + if rec['status'] != 'done': + return + + try: + need_reboot = pool_wide_cleanup(self.session) + except: + traceb = traceback.format_exc() + log.debug(traceb) + if debug: + log.debug( + "Running in debug mode - exiting due to exception when cleanup: %s" % sys.exc_info()[0]) + sys.exit(0) + + log.debug("The general cleanup is failed") + # reset test result + if rec['result'] == 'pass': + rec['result'] = 'fail' + rec['trackeback'] = traceb + rec['exception'] = "Unexpected error: %s" % \ + sys.exc_info()[0] + else: + # If test done normally then noneed reboot even if cleanup requires, that indicates + # test itself should handle reboot requirement as one test step + # If test is done by exception and cleanup requires reboot then ask runner to reboot + if rec['result'] == 'pass' and need_reboot: + log.debug( + "Warning: test should handle reboot requirement") + elif rec['result'] == 'fail' and need_reboot: + rec['superior'] = 'reboot' + log.debug( + "Ask for hosts reboot because current test did not finish normally") + # set result dict using below functions in TestClass def set_control(self, rec, value): rec['control'] = str(value) @@ -285,25 +292,8 @@ def get_required_config(self): the test cases with this class""" return self.required_config - def generate_static_net_conf(self): - log.debug("Config: %s" % self.config) - netconf = self.get_netconf() - log.debug("Netconf: %s" % netconf) - netid_rec = {} - for iface, rec in netconf.iteritems(): - if iface.startswith('eth'): - log.debug("iface: %s Rec: %s" % (iface, rec)) - nid = rec['network_id'] - - # Required for initialisation - if nid not in netid_rec: - netid_rec[nid] = [] - - # Append interface on that network id - netid_rec[nid].append(iface) - - res = {} - regex = re.compile(r'static_(?P\d+)_(?P\d+)') + def generate_static_net_conf_common(self, netid_rec, res): + regex = re.compile(r'static_(?P\d+)_(?P\d+)') # NOSONAR # Iterate through the network config structure to # see if we have any static managers to initialise. @@ -332,6 +322,26 @@ def generate_static_net_conf(self): res[key_name] = sm log.debug("Added static conf for '%s'" % key_name) + def generate_static_net_conf(self): + log.debug("Config: %s" % self.config) + netconf = self.get_netconf() + log.debug("Netconf: %s" % netconf) + netid_rec = {} + for iface, rec in netconf.iteritems(): + if iface.startswith('eth'): + log.debug("iface: %s Rec: %s" % (iface, rec)) + nid = rec['network_id'] + + # Required for initialisation + if nid not in netid_rec: + netid_rec[nid] = [] + + # Append interface on that network id + netid_rec[nid].append(iface) + + res = {} + self.generate_static_net_conf_common(netid_rec, res) + mgmt = get_pool_management_device(self.session) log.debug("The pool management device is %s" % mgmt) if 'static_management' in netconf: @@ -372,7 +382,7 @@ def get_static_manager(self, network_ref, vlan='0'): def get_vlans(self, iface): """ For a specified ethernet interface, return the list of VLANs that the user has declared to be in operation.""" - netconf = eval(self.config['netconf']) + netconf = eval(self.config['netconf']) # NOSONAR if iface not in netconf: raise Exception("The interface %s has not been defined in the network config file. (%s)" % (iface, netconf)) @@ -380,7 +390,7 @@ def get_vlans(self, iface): def get_netconf(self): """Return the network config dictionary, as provided by the user""" - return eval(self.config['netconf']) + return eval(self.config['netconf']) # NOSONAR def singlenicmode(self): return 'singlenic' in self.config.keys() and self.config['singlenic'] == 'true' @@ -475,8 +485,6 @@ def host_setup(self): call_ack_plugin(self.session, 'set_network_backend_pool', {'backend': 'bridge'}) host_reboot(self.session) - # Nothing to do, just return - return def get_bondable_ifaces(self, iface): """ Given a particular interface, return a list of other network @@ -513,12 +521,89 @@ def get_primary_bond_iface(self): return res -class LocalStorageTestClass(TestClass): +class PerformanceTest(TestClass): + # Deine the test timeout in seconds and the number of test VMs + timeout = 3600 + vm_count = 3 + + # SSH command variables + username = 'root' + password = DEFAULT_PASSWORD + + # Class variables + test = '' + cmd_str = '' + + def _call_plugin(self, session, vm_ref_list, call): + """Util function to call ACK plugin method""" + res = [] + for vm_ref in vm_ref_list: + res.append(call_ack_plugin(self.session, call, + {'vm_ref': vm_ref, + 'mip': get_context_vm_mip(vm_ref), + 'username': self.username, + 'password': self.password})) + return res + + def _create_test_threads(self, session, vm_ref_list): + """Spawns a new non-blocking test thread for each VM and + returns a reference object to these threads. Each thread is + a timeout function of function self.cmd_str which is run on + the master host by the XenAPI plugin""" + threads = [] + for vm_ref in vm_ref_list: + threads.append(create_test_thread(lambda vm=vm_ref: TimeoutFunction(ssh_command(get_context_vm_mip(vm), + self.username, + self.password, + self.cmd_str), + self.timeout, '%s test timed out %d' % (self.test, self.timeout)))) + return threads + + def _setup_vms(self, session): + """Interface to create VMs by child class""" + return [] + + def _run_test(self, session): + """Main run fuction. Sets up the VMs, deploys the test, + spawns the test threads, and tracks the threads until they + all complete""" + # setup vms + vm_ref_list = self._setup_vms(session) + + # Make certain the VMs are available + for vm_ref in vm_ref_list: + check_vm_ping_response(session, vm_ref, get_context_vm_mip(vm_ref)) + + # deploy test rpms + log.debug("Deploying test RPMs") + self._call_plugin(session, vm_ref_list, 'deploy_' + self.test) + + # create and start test threads, wait until complete + log.debug("About to run %s test..." % self.test) + threads = self._create_test_threads(session, vm_ref_list) + + # Wait for the threads to finish running or timeout + start = time.time() + while check_test_thread_status(threads): + time.sleep(1) + if should_timeout(start, self.timeout): + raise Exception("%s test timed out %s" % + (self.test, self.timeout)) + + # retrieve the logs + log.debug("%s test is complete, retrieving logs" % self.test) + self._call_plugin(session, vm_ref_list, + 'retrieve_' + self.test + '_logs') + + return {'info': 'Test ran successfully'} + + +class LocalStorageTestClass(PerformanceTest): """Sub class for storage tests""" base_tag = 'LS' -class CPUTestClass(TestClass): +class CPUTestClass(PerformanceTest): """Sub class for CPU tests""" base_tag = 'CPU' diff --git a/autocertkit/tests/utils_tests.py b/autocertkit/tests/utils_tests.py index 313e0d4c..f51d9c71 100644 --- a/autocertkit/tests/utils_tests.py +++ b/autocertkit/tests/utils_tests.py @@ -194,29 +194,6 @@ def test_valid_equal_ping_responses(self): self.assertTrue(utils.valid_ping_response(response, max_loss=5)) -class RebootFlagTimestamps(unittest.TestCase): - - def setUp(self): - self.tmpdir = tempfile.mkdtemp() - - def tearDown(self): - if os.path.exists(self.tmpdir): - shutil.rmtree(self.tmpdir) - - def test_set_flag(self): - flag = "%s/test_set_flag" % self.tmpdir - utils.set_reboot_flag(flag_loc=flag) - self.assertTrue(os.path.exists(flag)) - - def test_read_flag(self): - flag = "%s/test_read_flag" % self.tmpdir - ts = datetime.now() - utils.set_reboot_flag(flag_loc=flag) - fts = utils.get_reboot_flag_timestamp(flag) - fmt_str = "%Y-%m-%d %H:%M:%S" - self.assertEqual(fts.strftime(fmt_str), ts.strftime(fmt_str)) - - class HostLibMethodsTests(unittest.TestCase): """ Host related functions unit tests. diff --git a/autocertkit/utils.py b/autocertkit/utils.py index bc88aea0..7d17cf5d 100644 --- a/autocertkit/utils.py +++ b/autocertkit/utils.py @@ -468,8 +468,6 @@ def wrapped_value_in_range(value, min_v, max_v, wrap=4 * G): return pre_range or post_range - return False - class IperfTestStatsValidator(object): @@ -589,49 +587,6 @@ def get_slave_control_domain(session): return _find_control_domain(session, slave_refs[0]) -def set_reboot_flag(tc_info=None, flag_loc=REBOOT_FLAG_FILE): - """Set an OS flag (i.e. touch a file) for when we're about to reboot. - This is so that, on host reboot, we can work out whether we should - run, and what the status of the kit is""" - - ffile = open(flag_loc, 'w') - if tc_info: - ffile.write(str(tc_info)) - ffile.close() - - -def get_reboot_flag(flag=REBOOT_FLAG_FILE): - """Return a dictionary that contains information of when reboot was - initiated.""" - - if os.path.exists(flag): - ffile = open(flag, 'r') - flag_str = ffile.read().strip() - ffile.close() - - if len(flag_str) > 0: - tc_info = eval(flag_str) - if isinstance(tc_info, dict): - return tc_info - - return {'info': 'flag contains no previous running info.'} - else: - return None - - -def get_reboot_flag_timestamp(flag=REBOOT_FLAG_FILE): - """Finding when reboot was initialised.""" - if os.path.exists(flag): - time_str = time.ctime(os.path.getctime(flag)) - return datetime(*(time.strptime(time_str, "%a %b %d %H:%M:%S %Y")[0:6])) - return None - - -def clear_reboot_flag(flag=REBOOT_FLAG_FILE): - if os.path.exists(flag): - os.remove(flag) - - def reboot_all_hosts(session): master = get_pool_master(session) hosts = session.xenapi.host.get_all() @@ -650,7 +605,7 @@ def reboot_normally(session): # otherwise status.py will get wrong status then time.sleep(300) sys.exit(REBOOT_ERROR_CODE) - except Exception, e: + except: log.debug("ACK exit normally") @@ -755,20 +710,21 @@ def eval_expr(expr, val): test_val = ' '.join(arr[1:]) if condition == ">": - return val > test_val - if condition == "<": - return val < test_val - if condition == "=": - return val == test_val - if condition == "!=": - return val != test_val - if condition == ">=": - return val >= test_val - if condition == "<=": - return val <= test_val - - raise Exception("Specified condition is not yet supported for comparison: %s" % - condition) + res = val > test_val + elif condition == "<": + res = val < test_val + elif condition == "=": + res = val == test_val + elif condition == "!=": + res = val != test_val + elif condition == ">=": + res = val >= test_val + elif condition == "<=": + res = val <= test_val + else: + raise Exception("Specified condition is not yet supported for comparison: %s" % + condition) + return res def create_network(session, name_label, description, other_config): @@ -817,32 +773,32 @@ def get_pifs_by_device(session, device, hosts=[]): (device, hosts)) +def get_physical_pifs(session, pifs): + res = [] + for pif in pifs: + pif_rec = session.xenapi.PIF.get_record(pif) + if pif_rec['physical']: + res.append(pif) + elif pif_rec['bond_master_of']: + for bond in pif_rec['bond_master_of']: + bond_pifs = session.xenapi.Bond.get_slaves(bond) + res = res + get_physical_pifs(session, bond_pifs) + elif pif_rec['VLAN_master_of'] != 'OpaqueRef:NULL': + log.debug("VLAN PIF found: %s." % pif_rec) + vlan_obj = session.xenapi.VLAN.get_record( + pif_rec['VLAN_master_of']) + res = res + \ + get_physical_pifs(session, [vlan_obj['tagged_PIF']]) + else: + raise Exception( + "Error: %s is not physical, bond or VLAN" % pif_rec) + return res + + def get_physical_devices_by_network(session, network): """Taking a network, enumerate the list of physical devices attached to each component PIF. This may require some unwrapping (e.g. bonds) to determine all the consituent physical PIFs.""" - - def get_physical_pifs(session, pifs): - res = [] - for pif in pifs: - pif_rec = session.xenapi.PIF.get_record(pif) - if pif_rec['physical']: - res.append(pif) - elif pif_rec['bond_master_of']: - for bond in pif_rec['bond_master_of']: - bond_pifs = session.xenapi.Bond.get_slaves(bond) - res = res + get_physical_pifs(session, bond_pifs) - elif pif_rec['VLAN_master_of'] != 'OpaqueRef:NULL': - log.debug("VLAN PIF found: %s." % pif_rec) - vlan_obj = session.xenapi.VLAN.get_record( - pif_rec['VLAN_master_of']) - res = res + \ - get_physical_pifs(session, [vlan_obj['tagged_PIF']]) - else: - raise Exception( - "Error: %s is not physical, bond or VLAN" % pif_rec) - return res - pifs = session.xenapi.network.get_PIFs(network) physical_pifs = get_physical_pifs(session, pifs) @@ -1145,9 +1101,9 @@ def get_vm_vif_ifs(session, vm_ref): return ifs re_mac = re.compile( - r"""^%s/device/vif/(?P[0-9]+)/mac\s*=\s*"(?P.*)"$""" % dom_root) + r"""^%s/device/vif/(?P[0-9]+)/mac\s*=\s*"(?P.*)"$""" % dom_root) # NOSONAR re_ip = re.compile( - r"""^%s/attr/vif/(?P[0-9]+)/ipv4/(?P[0-9]+)\s*=\s*"(?P.*)"$""" % dom_root) + r"""^%s/attr/vif/(?P[0-9]+)/ipv4/(?P[0-9]+)\s*=\s*"(?P.*)"$""" % dom_root) # NOSONAR for line in res["stdout"].split('\n'): m = re_mac.match(line) if m: @@ -1162,7 +1118,6 @@ def get_vm_vif_ifs(session, vm_ref): if device not in ifs: ifs[device] = {"vif": device, "mac": "", "ip": ""} ifs[device]["ip"] = ip - continue return ifs @@ -1296,7 +1251,8 @@ def ping(vm_ip, dst_vm_ip, interface, packet_size=1400, def ping_with_retry(session, vm_ref, mip, dst_vm_ip, interface, timeout=20, retry=15): - loss_re = re.compile(""".* (?P[0-9]+)% packet loss, .*""", re.S) + loss_re = re.compile( + """.* (?P[0-9]+)% packet loss, .*""", re.S) # NOSONAR cmd_str = "ping -I %s -w %d %s" % (interface, timeout, dst_vm_ip) cmd = binascii.hexlify(cmd_str) @@ -1335,8 +1291,8 @@ def ssh_command(ip, username, password, cmd_str, dbg_str=None, attempts=10, time log.debug("Attempt %d/%d: %s" % (i, attempts, cmd_str)) try: - sshcmd = ssh.SSHCommand( - ip, cmd_str, log, username, timeout, password) + sshcmd = ssh.SSHCommand(ip, cmd_str, username, password, + {'log': log, 'timeout': timeout}) result = sshcmd.read() except Exception, e: log.debug("Exception: %s" % str(e)) @@ -1370,6 +1326,30 @@ def destroy_pif(session, pif): session.xenapi.PIF.destroy(pif) +def destroy_vm_vdi(session, vm_ref, timeout=60): + # Check that the VDI is not in-use + vbd_refs = session.xenapi.VM.get_VBDs(vm_ref) + for vbd_ref in vbd_refs: + vdi_ref = session.xenapi.VBD.get_VDI(vbd_ref) + log.debug("Destroying VDI %s" % vdi_ref) + try: + start = time.time() + ops_list = session.xenapi.VDI.get_allowed_operations(vdi_ref) + while 'destroy' not in ops_list: + time.sleep(2) + ops_list = session.xenapi.VDI.get_allowed_operations(vdi_ref) + if should_timeout(start, timeout): + raise Exception("Cannot destroy VDI: VDI is still active") + # If the VDI is free, try to destroy it. Should pass the exception + # catch if it is a NULL VDI reference. + session.xenapi.VDI.destroy(vdi_ref) + except XenAPI.Failure, exn: + if exn.details[0] == 'HANDLE_INVALID': + log.debug("Ignore XenAPI.Failure of HANDLE_INVALID") + else: + raise exn + + def destroy_vm(session, vm_ref, timeout=60): """Checks powerstate of a VM, destroys associated VDIs, and destroys VM once shutdown""" @@ -1409,27 +1389,8 @@ def destroy_vm(session, vm_ref, timeout=60): log.debug("VM %s is ready to be removed." % vm_ref) - # Check that the VDI is not in-use - vbd_refs = session.xenapi.VM.get_VBDs(vm_ref) - for vbd_ref in vbd_refs: - vdi_ref = session.xenapi.VBD.get_VDI(vbd_ref) - log.debug("Destroying VDI %s" % vdi_ref) - try: - start = time.time() - ops_list = session.xenapi.VDI.get_allowed_operations(vdi_ref) - while 'destroy' not in ops_list: - time.sleep(2) - ops_list = session.xenapi.VDI.get_allowed_operations(vdi_ref) - if should_timeout(start, timeout): - raise Exception("Cannot destroy VDI: VDI is still active") - # If the VDI is free, try to destroy it. Should pass the exception - # catch if it is a NULL VDI reference. - session.xenapi.VDI.destroy(vdi_ref) - except XenAPI.Failure, exn: - if exn.details[0] == 'HANDLE_INVALID': - pass - else: - raise exn + destroy_vm_vdi(session, vm_ref, timeout) + # Finally, destroy the VM log.debug("Destroying VM %s" % vm_ref) session.xenapi.VM.destroy(vm_ref) @@ -1458,7 +1419,7 @@ def host_cleanup(session, host): default_route_key = 'default_routes' default_route_list = [] if default_route_key in oc.keys(): - default_routes = eval(oc[default_route_key]) + default_routes = eval(oc[default_route_key]) # NOSONAR for rec in default_routes: route_obj = route.Route(**rec) default_route_list.append(route_obj) @@ -1488,6 +1449,26 @@ def pool_wide_host_cleanup(session): host_cleanup(session, host) +def pool_wide_vm_dom0_cleanup(session, tag, vm, oc): + # Cleanup any routes that are lying around + keys_to_clean = [] + for k, v in oc.iteritems(): + if k.startswith('route_clean_'): + # Call plugin + call_ack_plugin(session, 'remove_route', + { + 'vm_ref': vm, + 'dest_ip': v, + }) + keys_to_clean.append(k) + + if keys_to_clean: + for key in keys_to_clean: + del oc[key] + + session.xenapi.VM.set_other_config(vm, oc) + + def pool_wide_vm_cleanup(session, tag): """Searches for VMs with a cleanup tag, and destroys""" vms = session.xenapi.VM.get_all() @@ -1498,23 +1479,7 @@ def pool_wide_vm_cleanup(session, tag): continue if session.xenapi.VM.get_is_control_domain(vm): - # Cleanup any routes that are lying around - keys_to_clean = [] - for k, v in oc.iteritems(): - if k.startswith('route_clean_'): - # Call plugin - call_ack_plugin(session, 'remove_route', - { - 'vm_ref': vm, - 'dest_ip': v, - }) - keys_to_clean.append(k) - - if keys_to_clean: - for key in keys_to_clean: - del oc[key] - - session.xenapi.VM.set_other_config(vm, oc) + pool_wide_vm_dom0_cleanup(session, tag, vm, oc) def pool_wide_network_sriov_cleanup(session, tag): @@ -1532,6 +1497,18 @@ def pool_wide_network_sriov_cleanup(session, tag): return need_reboot +def pool_wide_network_host_pif_cleanup(session, tag): + for host in session.xenapi.host.get_all(): + for pif in session.xenapi.host.get_PIFs(host): + oc = session.xenapi.PIF.get_other_config(pif) + if oc.pop(tag, None): + log.debug("Pif to cleanup: %s from host %s" % (pif, host)) + call_ack_plugin(session, 'flush_local_device', + {'device': session.xenapi.PIF.get_device(pif)}, + host=host) + session.xenapi.PIF.set_other_config(pif, oc) + + def pool_wide_network_cleanup(session, tag): """Searches for networks with a cleanup tag, and destroys if found""" @@ -1549,15 +1526,7 @@ def pool_wide_network_cleanup(session, tag): session.xenapi.network.destroy(network) elif session.xenapi.network.get_MTU(network) != '1500': set_network_mtu(session, network, '1500') - for host in session.xenapi.host.get_all(): - for pif in session.xenapi.host.get_PIFs(host): - oc = session.xenapi.PIF.get_other_config(pif) - if oc.pop(tag, None): - log.debug("Pif to cleanup: %s from host %s" % (pif, host)) - call_ack_plugin(session, 'flush_local_device', - {'device': session.xenapi.PIF.get_device(pif)}, - host=host) - session.xenapi.PIF.set_other_config(pif, oc) + pool_wide_network_host_pif_cleanup(session, tag) def get_pool_management_device(session): @@ -1602,9 +1571,8 @@ def get_local_sr(session, host): for pbd_ref, pbd_rec in all_pbds.iteritems(): if host in pbd_rec['host']: for sr_ref, sr_rec in all_srs.iteritems(): - if 'Local storage' in sr_rec['name_label']: - if pbd_rec['SR'] in sr_ref: - return sr_ref + if 'Local storage' in sr_rec['name_label'] and pbd_rec['SR'] in sr_ref: + return sr_ref raise Exception("No local SR attached to the master host") @@ -1884,18 +1852,21 @@ def init_ifs_ip_addressing(session, vm_ref, vifs_info): device = "ethx%d" % id mac, ip, netmask, gw = vif_info[1], vif_info[2], vif_info[3], vif_info[4] if ip: - droid_add_static_ifcfg( - session, host_ref, vm_ref, mip, device, mac, ip, netmask, gw) + dev_info = {'iface': device, 'mac': mac, + 'ip': ip, 'netmask': netmask, 'gw': gw} + droid_add_static_ifcfg(session, host_ref, vm_ref, mip, dev_info) else: droid_add_dhcp_ifcfg(session, host_ref, vm_ref, mip, device, mac) -def droid_add_static_ifcfg(session, host, vm_ref, mip, iface, mac, ip, netmask, gw): +def droid_add_static_ifcfg(session, host, vm_ref, mip, dev_info): """Set VM interface static ip in config file ifcfg-eth*""" cmd = b'''echo "TYPE=Ethernet\nNAME=%s\nDEVICE=%s\nHWADDR=%s\n''' \ b'''IPADDR=%s\nNETMASK=%s\nGATEWAY=%s\nBOOTPROTO=none\nONBOOT=yes" ''' \ b'''> "%s/ifcfg-%s" ''' \ - % (iface, iface, mac, ip, netmask, gw, "/etc/sysconfig/network-scripts", iface) + % (dev_info['iface'], dev_info['iface'], dev_info['mac'], dev_info['ip'], + dev_info['netmask'], dev_info['gw'], "/etc/sysconfig/network-scripts", + dev_info['iface']) args = {'vm_ref': vm_ref, 'mip': mip, 'username': 'root', @@ -2222,7 +2193,7 @@ def process_values(item): return [data] if isinstance(data, dict) else data -def call_ack_plugin(session, method, args={}, host=None, noJsonHook=False): +def call_ack_plugin(session, method, args={}, host=None, no_json_hook=False): if not host: host = get_pool_master(session) log.debug("About to call plugin '%s' on host '%s' with args '%s'" % @@ -2234,7 +2205,9 @@ def call_ack_plugin(session, method, args={}, host=None, noJsonHook=False): args) log.debug("Plugin Output: %s" % ( "%s[...check plugin log for more]" % res[:1000] if res and len(res) > 1000 else res)) - return (json.loads(res) if noJsonHook else json_loads(res)) if res else None + if res: + return json.loads(res) if no_json_hook else json_loads(res) + return None def get_hw_offloads(session, device): @@ -2324,13 +2297,13 @@ def wait_for_dom0_device_ip(session, vm_ref, device, static_manager): def get_vm_interface(session, host, vm_ref, mip): """Use ip command to get all interface (eth*) information""" - # e.g. ifs["eth0"] = ["eth0", "ec:f4:bb:ce:91:9c", "10.62.114.80"] + # e.g. eth0: [eth0, ec:f4:bb:ce:91:9c, 10.62.114.80] ifs = {} # cmd output: "eth0: ec:f4:bb:ce:91:9c" cmd = b"""ip -o link | awk '{if($2 ~ /^eth/) print $2,$(NF-2)}'""" res = ssh_command(mip, 'root', DEFAULT_PASSWORD, cmd) - mac_re = re.compile(r"(?P.*): (?P.*)") + mac_re = re.compile(r"(?P.*): (?P.*)") # NOSONAR for line in res['stdout'].strip().split('\n'): match = mac_re.match(line) if match: @@ -2340,7 +2313,7 @@ def get_vm_interface(session, host, vm_ref, mip): # cmd output: "eth0 10.62.114.80/21" cmd = b"""ip -o -f inet addr | awk '{if($2 ~ /^eth/) print $2,$4}'""" res = ssh_command(mip, 'root', DEFAULT_PASSWORD, cmd) - ip_re = re.compile(r"(?P.*) (?P.*)") + ip_re = re.compile(r"(?P.*) (?P.*)") # NOSONAR for line in res['stdout'].strip().split('\n'): match = ip_re.match(line) if match: @@ -2412,17 +2385,14 @@ def check_test_thread_status(threads): def get_system_info_hwinfo(session): - return call_ack_plugin(session, 'get_system_info_hwinfo', noJsonHook=True) + return call_ack_plugin(session, 'get_system_info_hwinfo', no_json_hook=True) def get_system_info_tabular(session): return call_ack_plugin(session, 'get_system_info_tabular') -def get_master_network_devices(session): - nics = call_ack_plugin(session, 'get_network_devices') - log.debug("Network Devices found on machine(Plugin): '%s'" % nics) - +def remove_invalid_keys(nics): # remove invalid keys of nic which violates xml, referring to # https://stackoverflow.com/questions/19677315/xml-tagname-starting-with-number-is-not-working for n in nics: @@ -2431,6 +2401,13 @@ def get_master_network_devices(session): n.pop(k) log.debug("Remove invalid key %s from %s" % (k, n['PCI_name'])) + +def get_master_network_devices(session): + nics = call_ack_plugin(session, 'get_network_devices') + log.debug("Network Devices found on machine(Plugin): '%s'" % nics) + + remove_invalid_keys(nics) + hwinfo_devs = get_system_info_hwinfo(session) if hwinfo_devs: nics_hw = hwinfo_devs['nics'] @@ -2449,22 +2426,22 @@ def get_local_storage_info(session): return devices -def _convertToValidXmlElementName(str1): +def _convert_to_valid_xml_element_name(str1): if str1 and not str1[0].isalpha(): str1 = "_" + str1 str1 = str1.replace(":", "_") return str1 -def _convertDictKeysToValidXmlTags(d): - return {_convertToValidXmlElementName(k): d[k] for k in d} +def _convert_dict_keys_to_valid_xml_tags(d): + return {_convert_to_valid_xml_element_name(k): d[k] for k in d} def get_xs_info(session): """Returns a limited subset of info about the XenServer version""" master_ref = get_pool_master(session) info = session.xenapi.host.get_software_version(master_ref) - return _convertDictKeysToValidXmlTags(info) + return _convert_dict_keys_to_valid_xml_tags(info) def _get_type_and_value(entry): @@ -2478,6 +2455,13 @@ def _get_type_and_value(entry): return r +def copy_dict_items(src, dst, keys): + """Copy src dict items to dst and rename with new key""" + for skey, dkey in keys: + if skey in src: + dst[dkey] = src[skey] + + def get_system_info(session): """Returns some information of system and bios.""" @@ -2485,38 +2469,26 @@ def get_system_info(session): biosinfo = search_dmidecode(session, "BIOS Information") if biosinfo: entries = _get_type_and_value(biosinfo[0]) - if 'Vendor' in entries: - rec['BIOS_vendor'] = entries['Vendor'] - if 'Version' in entries: - rec['BIOS_version'] = entries['Version'] - if 'Release Date' in entries: - rec['BIOS_release_date'] = entries['Release Date'] - if 'BIOS Revision' in entries: - rec['BIOS_revision'] = entries['BIOS Revision'] + copy_dict_items(entries, rec, [('Vendor', 'BIOS_vendor'), + ('Version', 'BIOS_version'), + ('Release Date', 'BIOS_release_date'), + ('BIOS Revision', 'BIOS_revision')]) sysinfo = search_dmidecode(session, "System Information") if sysinfo: entries = _get_type_and_value(sysinfo[0]) - if 'Manufacturer' in entries: - rec['system_manufacturer'] = entries['Manufacturer'] - if 'Product Name' in entries: - rec['system_product_name'] = entries['Product Name'] - if 'Serial Number' in entries: - rec['system_serial_number'] = entries['Serial Number'] - if 'UUID' in entries: - rec['system_uuid'] = entries['UUID'] - if 'Version' in entries: - rec['system_version'] = entries['Version'] - if 'Family' in entries: - rec['system_family'] = entries['Family'] + copy_dict_items(entries, rec, [('Manufacturer', 'system_manufacturer'), + ('Product Name', 'system_product_name'), + ('Serial Number', 'system_serial_number'), + ('UUID', 'system_uuid'), + ('Version', 'system_version'), + ('Family', 'system_family')]) chassisinfo = search_dmidecode(session, "Chassis Information") if chassisinfo: entries = _get_type_and_value(chassisinfo[0]) - if 'Type' in entries: - rec['chassis_type'] = entries['Type'] - if 'Manufacturer' in entries: - rec['chassis_manufacturer'] = entries['Manufacturer'] + copy_dict_items(entries, rec, [('Type', 'chassis_type'), + ('Manufacturer', 'chassis_manufacturer')]) return rec @@ -2548,21 +2520,21 @@ def get_value(rec, key, default=""): def print_documentation(object_name): - print "--------- %s ---------" % bold(object_name) + print("--------- %s ---------" % bold(object_name)) classes = enumerate_test_classes() for test_class_name, test_class in classes: arr = (object_name).split('.') if test_class_name == object_name: # get the class info - print format(test_class.__doc__) - print "%s: %s" % (bold('Prereqs'), test_class.required_config) + print(format(test_class.__doc__)) + print("%s: %s" % (bold('Prereqs'), test_class.required_config)) sys.exit(0) elif len(arr) == 3 and ".".join(arr[:2]) == test_class_name: # get the method info - print format(getattr(test_class, arr[2]).__doc__) + print(format(getattr(test_class, arr[2]).__doc__)) sys.exit(0) - print "The test name specified (%s) was incorrect. Please specify the full test name." % object_name + print("The test name specified (%s) was incorrect. Please specify the full test name." % object_name) sys.exit(0) @@ -2595,9 +2567,9 @@ def read_valid_lines(filename): return res -def set_network_mtu(session, network_ref, MTU): +def set_network_mtu(session, network_ref, mtu): """Utility function for setting a network's MTU. MTU should be a string""" - session.xenapi.network.set_MTU(network_ref, str(MTU)) + session.xenapi.network.set_MTU(network_ref, str(mtu)) pifs = session.xenapi.network.get_PIFs(network_ref) for pif in pifs: unplug_pif(session, pif) @@ -2671,7 +2643,8 @@ def get_ack_version(session, host=None): try: return call_ack_plugin(session, 'get_ack_version', {}, host=host) except XenAPI.Failure, e: - log.debug("Failed to execute ack plugin call means ACK is not installed.") + log.debug( + "Failed to execute ack plugin call means ACK is not installed. Exception: %s" % str(e)) return None @@ -2700,7 +2673,7 @@ def check_vm_ping_response(session, vm_ref, mip, count=3, timeout=300): # Make the local shell call log.debug("Checking for ping response from VM %s at %s" % ( vm_ref, mip)) - process = subprocess.Popen(call, stdout=subprocess.PIPE) + process = subprocess.Popen(call, stdout=subprocess.PIPE) # NOSONAR stdout, stderr = process.communicate() response = str(stdout).strip() diff --git a/plugins/autocertkit b/plugins/autocertkit index 4bea868f..80bb7fe4 100755 --- a/plugins/autocertkit +++ b/plugins/autocertkit @@ -280,8 +280,8 @@ def ssh_command(ip, username, password, cmd_str, dbg_str=None, attempts=10, time log.debug("Attempt %d/%d: %s" % (i, attempts, cmd_str)) try: - sshcmd = ssh.SSHCommand( - ip, cmd_str, log, username, timeout, password) + sshcmd = ssh.SSHCommand(ip, cmd_str, username, password, + {'log': log, 'timeout': timeout}) result = sshcmd.read() except Exception, e: log.debug("Exception: %s" % str(e))