Skip to content

Commit

Permalink
Move the snapshot restore testcases to the TestBackupRestore class
Browse files Browse the repository at this point in the history
  • Loading branch information
TachunLin committed Sep 2, 2024
1 parent ff7063f commit a77925b
Showing 1 changed file with 38 additions and 117 deletions.
155 changes: 38 additions & 117 deletions harvester_e2e_tests/integrations/test_4_vm_backup_restore.py
Original file line number Diff line number Diff line change
Expand Up @@ -634,42 +634,7 @@ def test_restore_replace_with_vm_shutdown_command(
f"Executed stderr: {err}"
)


@pytest.mark.p0
@pytest.mark.backup_target
@pytest.mark.parametrize(
"backup_config", [
pytest.param("S3", marks=pytest.mark.S3),
pytest.param("NFS", marks=pytest.mark.NFS)
],
indirect=True)
class TestBackupRestoreWithSnapshot:

@pytest.mark.dependency()
def test_connection(self, api_client, backup_config, config_backup_target):
code, data = api_client.settings.backup_target_test_connection()
assert 200 == code, f'Failed to test backup target connection: {data}'

@pytest.mark.dependency(depends=["TestBackupRestoreWithSnapshot::test_connection"], param=True)
def tests_backup_vm(self, api_client, wait_timeout, backup_config, base_vm_with_data):
unique_vm_name = base_vm_with_data['name']
unique_backup_name = f"{unique_vm_name}-backup-with-snapshot"
# Create backup with the name as VM's name
code, data = api_client.vms.backup(unique_vm_name, unique_backup_name)
assert 204 == code, (code, data)
# Check backup is ready
endtime = datetime.now() + timedelta(seconds=wait_timeout)
while endtime > datetime.now():
code, backup = api_client.backups.get(unique_backup_name)
if 200 == code and backup.get('status', {}).get('readyToUse'):
break
sleep(3)
else:
raise AssertionError(
f'Timed-out waiting for the backup \'{unique_backup_name}\' to be ready.'
)

@pytest.mark.dependency(depends=["TestBackupRestoreWithSnapshot::tests_backup_vm"], param=True)
@pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True)
def test_with_snapshot_restore_with_new_vm(
self, api_client, vm_shell_from_host, vm_checker, ssh_keypair, wait_timeout,
backup_config, base_vm_with_data
Expand All @@ -678,7 +643,7 @@ def test_with_snapshot_restore_with_new_vm(
pub_key, pri_key = ssh_keypair

vm_snapshot_name = unique_vm_name + '-snapshot'
unique_backup_name = f"{unique_vm_name}-backup-with-snapshot"
# unique_backup_name = f"{unique_vm_name}-backup-with-snapshot"
# take vm snapshot
code, data = api_client.vm_snapshots.create(unique_vm_name, vm_snapshot_name)
assert 201 == code
Expand All @@ -696,6 +661,26 @@ def test_with_snapshot_restore_with_new_vm(
assert 200 == code
assert data.get("status", {}).get("readyToUse") is True

vm_running, (code, data) = vm_checker.wait_status_running(unique_vm_name)
assert vm_running, (
f"Failed to restore VM({unique_vm_name}) with errors:\n"
f"Status({code}): {data}"
)

# Check VM Started then get IPs (vm and host)
vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
assert vm_got_ips, (
f"Failed to Start VM({unique_vm_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)
vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
if iface['name'] == 'default')
code, data = api_client.hosts.get(data['status']['nodeName'])
host_ip = next(addr['address'] for addr in data['status']['addresses']
if addr['type'] == 'InternalIP')
base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'] = host_ip, vm_ip

# mess up the existing data
with vm_shell_from_host(
base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'],
Expand All @@ -708,12 +693,9 @@ def test_with_snapshot_restore_with_new_vm(
# Restore VM into new
restored_vm_name = f"{backup_config[0].lower()}-restore-{unique_vm_name}-with-snapshot"
spec = api_client.backups.RestoreSpec.for_new(restored_vm_name)
code, data = api_client.backups.restore(unique_backup_name, spec)
code, data = api_client.backups.restore(unique_vm_name, spec)
assert 201 == code, (code, data)

vm_getable, (code, data) = vm_checker.wait_getable(restored_vm_name)
assert vm_getable, (code, data)

# Check VM Started then get IPs (vm and host)
vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(restored_vm_name, ['default'])
assert vm_got_ips, (
Expand Down Expand Up @@ -768,7 +750,7 @@ def test_with_snapshot_restore_with_new_vm(
vol_name = vol['volume']['persistentVolumeClaim']['claimName']
api_client.volumes.delete(vol_name)

@pytest.mark.dependency(depends=["TestBackupRestoreWithSnapshot::tests_backup_vm"], param=True)
@pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True)
def test_with_snapshot_restore_replace_retain_vols(
self, api_client, vm_shell_from_host, ssh_keypair, wait_timeout, vm_checker,
backup_config, base_vm_with_data
Expand All @@ -777,7 +759,7 @@ def test_with_snapshot_restore_replace_retain_vols(
pub_key, pri_key = ssh_keypair

vm_snapshot_name = unique_vm_name + '-snapshot-retain'
unique_backup_name = f"{unique_vm_name}-backup-with-snapshot"
# unique_backup_name = f"{unique_vm_name}-backup-with-snapshot"
# take vm snapshot
code, data = api_client.vm_snapshots.create(unique_vm_name, vm_snapshot_name)
assert 201 == code
Expand All @@ -795,28 +777,12 @@ def test_with_snapshot_restore_replace_retain_vols(
assert 200 == code
assert data.get("status", {}).get("readyToUse") is True

# mess up the existing data
with vm_shell_from_host(
base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'],
base_vm_with_data['ssh_user'], pkey=pri_key
) as sh:
out, err = sh.exec_command(f"echo {pub_key!r} > {base_vm_with_data['data']['path']}")
assert not err, (out, err)
sh.exec_command('sync')

# Stop the VM then restore existing
vm_stopped, (code, data) = vm_checker.wait_stopped(unique_vm_name)
assert vm_stopped, (
f"Failed to Stop VM({unique_vm_name}) with errors:\n"
vm_running, (code, data) = vm_checker.wait_status_running(unique_vm_name)
assert vm_running, (
f"Failed to restore VM({unique_vm_name}) with errors:\n"
f"Status({code}): {data}"
)

spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=False)
code, data = api_client.backups.restore(unique_backup_name, spec)
assert 201 == code, f'Failed to restore backup with current VM replaced, {data}'
vm_getable, (code, data) = vm_checker.wait_getable(unique_vm_name)
assert vm_getable, (code, data)

# Check VM Started then get IPs (vm and host)
vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
assert vm_got_ips, (
Expand All @@ -829,69 +795,27 @@ def test_with_snapshot_restore_replace_retain_vols(
code, data = api_client.hosts.get(data['status']['nodeName'])
host_ip = next(addr['address'] for addr in data['status']['addresses']
if addr['type'] == 'InternalIP')
base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'] = host_ip, vm_ip

# Login to the new VM and check data is existing
with vm_shell_from_host(host_ip, vm_ip, base_vm_with_data['ssh_user'], pkey=pri_key) as sh:
cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
assert cloud_inited, (
f"VM {unique_vm_name} Started {wait_timeout} seconds"
f", but cloud-init still in {out}"
)
out, err = sh.exec_command(f"cat {backup_data['path']}")

assert backup_data['content'] in out, (
f"cloud-init writefile failed\n"
f"Executed stdout: {out}\n"
f"Executed stderr: {err}"
)

@pytest.mark.skip_version_if('< v1.2.2')
@pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True)
def test_restore_replace_with_vm_shutdown_command(
self, api_client, vm_shell_from_host, ssh_keypair, wait_timeout, vm_checker,
backup_config, base_vm_with_data
):
''' ref: https://github.com/harvester/tests/issues/943
1. Create VM and write some data
2. Take backup for the VM
3. Mess up existing data
3. Shutdown the VM by executing `shutdown` command in OS
4. Restore backup to replace existing VM
5. VM should be restored successfully
6. Data in VM should be the same as backed up
'''

unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data']
pub_key, pri_key = ssh_keypair

# mess up the existing data then shutdown it
# mess up the existing data
with vm_shell_from_host(
base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'],
base_vm_with_data['ssh_user'], pkey=pri_key
) as sh:
out, err = sh.exec_command(f"echo {pub_key!r} > {base_vm_with_data['data']['path']}")
assert not err, (out, err)
sh.exec_command('sync')
sh.exec_command('sudo shutdown now')

endtime = datetime.now() + timedelta(seconds=wait_timeout)
while endtime > datetime.now():
code, data = api_client.vms.get(unique_vm_name)
if 200 == code and "Stopped" == data.get('status', {}).get('printableStatus'):
break
sleep(5)
else:
raise AssertionError(
f"Failed to shut down VM({unique_vm_name}) with errors:\n"
f"Status({code}): {data}"
)
# Stop the VM then restore existing
vm_stopped, (code, data) = vm_checker.wait_stopped(unique_vm_name)
assert vm_stopped, (
f"Failed to Stop VM({unique_vm_name}) with errors:\n"
f"Status({code}): {data}"
)

# restore VM to existing
spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=True)
spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=False)
code, data = api_client.backups.restore(unique_vm_name, spec)
assert 201 == code, f'Failed to restore backup with current VM replaced, {data}'
vm_getable, (code, data) = vm_checker.wait_getable(unique_vm_name)
assert vm_getable, (code, data)

# Check VM Started then get IPs (vm and host)
vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
Expand All @@ -905,6 +829,7 @@ def test_restore_replace_with_vm_shutdown_command(
code, data = api_client.hosts.get(data['status']['nodeName'])
host_ip = next(addr['address'] for addr in data['status']['addresses']
if addr['type'] == 'InternalIP')
base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'] = host_ip, vm_ip

# Login to the new VM and check data is existing
with vm_shell_from_host(host_ip, vm_ip, base_vm_with_data['ssh_user'], pkey=pri_key) as sh:
Expand All @@ -921,10 +846,6 @@ def test_restore_replace_with_vm_shutdown_command(
f"Executed stderr: {err}"
)

# TODO: try to find the suitable solution to delete the retain volume
# without making the deleting VM stuck in Terminating


@pytest.mark.skip("https://github.com/harvester/harvester/issues/1473")
@pytest.mark.p0
@pytest.mark.backup_target
Expand Down

0 comments on commit a77925b

Please sign in to comment.