Skip to content

Commit

Permalink
WIP
Browse files Browse the repository at this point in the history
  • Loading branch information
stormi committed Mar 5, 2024
1 parent 56845e5 commit fa65155
Show file tree
Hide file tree
Showing 4 changed files with 81 additions and 23 deletions.
40 changes: 20 additions & 20 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

import lib.config as global_config

from lib.common import safe_split, wait_for, vm_image, is_uuid
from lib.common import wait_for, vm_image, is_uuid
from lib.common import setup_formatted_and_mounted_disk, teardown_formatted_and_mounted_disk
from lib.netutil import is_ipv6
from lib.pool import Pool
Expand Down Expand Up @@ -278,28 +278,28 @@ def vm_ref(request):

@pytest.fixture(scope="module")
def imported_vm(host, vm_ref):
# Do we cache VMs?
try:
from data import CACHE_IMPORTED_VM
except ImportError:
CACHE_IMPORTED_VM = False
assert CACHE_IMPORTED_VM in [True, False]

if is_uuid(vm_ref):
vm = VM(vm_ref, host)
name = vm.name()
vm_orig = VM(vm_ref, host)
name = vm_orig.name()
logging.info(">> Reuse VM %s (%s) on host %s" % (vm_ref, name, host))
else:
# where to import to: default SR, or first local SR
try:
from data import DEFAULT_SR
except ImportError:
DEFAULT_SR = 'default'
assert DEFAULT_SR in ['default', 'local']

if DEFAULT_SR == 'default':
vm = host.import_vm(vm_ref)
elif DEFAULT_SR == 'local':
local_sr_uuids = safe_split(
# xe sr-list doesn't support filtering by host UUID!
host.ssh(['xe sr-list host=$HOSTNAME content-type=user minimal=true']),
','
)
assert local_sr_uuids, "The first pool master (host A1) MUST have a local SR since DEFAULT_SR=='local'"
vm = host.import_vm(vm_ref, local_sr_uuids[0])
vm_orig = host.import_vm(vm_ref, host.main_sr(), use_cache=CACHE_IMPORTED_VM)

if CACHE_IMPORTED_VM:
# Clone the VM before running tests, so that the original VM remains untouched
logging.info(">> Clone cached VM before running tests")
vm = vm_orig.clone()
# Remove the description, which may contain a cache identifier
vm.param_set('name-description', None, "")
else:
vm = vm_orig

yield vm
# teardown
Expand Down
9 changes: 9 additions & 0 deletions data.py-dist
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,15 @@ VM_IMAGES = {
# - 'local': use the first local SR found instead
DEFAULT_SR = 'default'

# Whether to cache VMs on the test host, that is import them only if not already present
# This also causes the VM to be cloned at the beginning of each test module, so that
# the original VM remains untouched.
# /!\ The VM identifier in cache is merely the URL where it was imported from
# A cached VM is just a VM which has this URL as its description. Delete it to remove it from cache.
# A cached VM will only be used if it is present in the target SR for the tentative VM import.
# This setting affects VMs managed by the `imported_vm` fixture.
CACHE_IMPORTED_VM = False

# Default NFS device config:
DEFAULT_NFS_DEVICE_CONFIG = {
# 'server': '10.0.0.2', # URL/Hostname of NFS server
Expand Down
45 changes: 43 additions & 2 deletions lib/host.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,23 @@ def xo_server_reconnect(self):
# is not enough to guarantee that the host object exists yet.
wait_for(lambda: xo_object_exists(self.uuid), "Wait for XO to know about HOST %s" % self.uuid)

def import_vm(self, uri, sr_uuid=None):
def import_vm(self, uri, sr_uuid=None, use_cache=False):
if use_cache:
assert sr_uuid, "A SR UUID is necessary to use import cache"
cache_key = f"[Cache for {uri}]"
# Look for an existing cache VM
vm_uuids = safe_split(self.xe('vm-list', {'name-description': cache_key}, minimal=True), ',')

# FIXME potentially too long?
for vm_uuid in vm_uuids:
vm = VM(vm_uuid, self)
# Make sure the VM is on the wanted SR.
# Assumption: if the first disk is on the SR, the VM is.
# If there's no VDI at all, then it is virtually on any SR.
if not vm.vdi_uuids() or vm.get_sr().uuid == sr_uuid:
logging.info(f"Reusing cached VM {vm.uuid} for {uri}")
return vm

params = {}
msg = "Import VM %s" % uri
if '://' in uri:
Expand All @@ -214,11 +230,14 @@ def import_vm(self, uri, sr_uuid=None):
vm_uuid = self.xe('vm-import', params)
logging.info("VM UUID: %s" % vm_uuid)
vm_name = prefix_object_name(self.xe('vm-param-get', {'uuid': vm_uuid, 'param-name': 'name-label'}))
self.xe('vm-param-set', {'uuid': vm_uuid, 'name-label': vm_name})
vm = VM(vm_uuid, self)
vm.param_set('name-label', None, vm_name)
# Set VM VIF networks to the host's management network
for vif in vm.vifs():
vif.move(self.management_network())
if use_cache:
logging.info(f"Marking VM {vm.uuid} as cached")
vm.param_set('name-description', None, cache_key)
return vm

def pool_has_vm(self, vm_uuid, vm_type='vm'):
Expand Down Expand Up @@ -430,6 +449,28 @@ def local_vm_srs(self):
srs.append(sr)
return srs

def main_sr(self):
""" Main SR is either the default SR, or the first local SR, depending on data.py's DEFAULT_SR. """
try:
from data import DEFAULT_SR
except ImportError:
DEFAULT_SR = 'default'
assert DEFAULT_SR in ['default', 'local']

sr_uuid = None
if DEFAULT_SR == 'local':
local_sr_uuids = safe_split(
# xe sr-list doesn't support filtering by host UUID!
self.ssh(['xe sr-list host=$HOSTNAME content-type=user minimal=true']),
','
)
assert local_sr_uuids, f"DEFAULT_SR=='local' so there must be a local SR on host {self}"
sr_uuid = local_sr_uuids[0]
else:
sr_uuid = self.pool.param_get('default-SR')
assert sr_uuid, f"DEFAULT_SR='default' so there must be a default SR on the pool of host {self}"
return sr_uuid

def hostname(self):
return self.ssh(['hostname'])

Expand Down
10 changes: 9 additions & 1 deletion lib/pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@

import lib.commands as commands

from lib.common import safe_split, wait_for, wait_for_not
from lib.common import safe_split, wait_for, wait_for_not, _param_get, _param_set
from lib.host import Host
from lib.sr import SR

class Pool:
xe_prefix = "pool"

def __init__(self, master_hostname_or_ip):
master = Host(self, master_hostname_or_ip)
assert master.is_master(), f"Host {master_hostname_or_ip} is not a master host. Aborting."
Expand All @@ -22,6 +24,12 @@ def __init__(self, master_hostname_or_ip):
self.uuid = self.master.xe('pool-list', minimal=True)
self.saved_uefi_certs = None

def param_get(self, param_name, key=None, accept_unknown_key=False):
return _param_get(self.master, Pool.xe_prefix, self.uuid, param_name, key, accept_unknown_key)

def param_set(self, param_name, value, key=None):
_param_set(self.master, Pool.xe_prefix, self.uuid, param_name, value, key)

def exec_on_hosts_on_error_rollback(self, func, rollback_func, host_list=[]):
"""
Execute a function on all hosts of the pool.
Expand Down

0 comments on commit fa65155

Please sign in to comment.