Skip to content
This repository has been archived by the owner on Apr 7, 2022. It is now read-only.

Commit

Permalink
Merge pull request #10218 from tpapaioa/fix_test_vm_retirement_from_g…
Browse files Browse the repository at this point in the history
…lobal_region

[RFR] Fix navigation for temp appliance tests
  • Loading branch information
mshriver authored Jul 2, 2020
2 parents 2e7a4cc + 825080d commit 4bd536d
Show file tree
Hide file tree
Showing 3 changed files with 66 additions and 46 deletions.
9 changes: 9 additions & 0 deletions cfme/fixtures/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,9 @@ def distributed_appliances(temp_appliance_preconfig_funcscope_rhevm,
secondary_appliance.configure(region=0, key_address=primary_appliance.hostname,
db_address=primary_appliance.hostname)

primary_appliance.browser_steal = True
secondary_appliance.browser_steal = True

return primary_appliance, secondary_appliance


Expand All @@ -299,6 +302,9 @@ def replicated_appliances(temp_appliance_preconfig_funcscope_rhevm,
global_appliance.add_pglogical_replication_subscription(remote_appliance.hostname)
logger.info("Finished appliance replication configuration.")

remote_appliance.browser_steal = True
global_appliance.browser_steal = True

return remote_appliance, global_appliance


Expand All @@ -318,6 +324,9 @@ def replicated_appliances_preupdate(multiple_preupdate_appliances):
global_appliance.add_pglogical_replication_subscription(remote_appliance.hostname)
logger.info("Finished appliance replication configuration.")

global_appliance.browser_steal = True
remote_appliance.browser_steal = True

return remote_appliance, global_appliance


Expand Down
30 changes: 21 additions & 9 deletions cfme/tests/cloud_infra_common/test_retirement.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from cfme.services.requests import RequestsView
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import wait_for
Expand Down Expand Up @@ -434,11 +435,8 @@ def test_resume_retired_instance(create_vm, provider, remove_date):
@pytest.mark.long_running
@test_requirements.multi_region
@test_requirements.retirement
def test_vm_retirement_from_global_region(setup_multi_region_cluster,
multi_region_cluster,
activate_global_appliance,
setup_remote_provider,
create_vm):
@pytest.mark.meta(blockers=[BZ(1839770)])
def test_vm_retirement_from_global_region(replicated_appliances, create_vm):
"""
Retire a VM via Centralized Administration
Expand All @@ -456,15 +454,29 @@ def test_vm_retirement_from_global_region(setup_multi_region_cluster,
2. VM transitions to Retired state in the Global and Remote region.
"""
remote_appliance, global_appliance = replicated_appliances

expected_date = {}
expected_date['start'] = datetime.utcnow() + timedelta(minutes=-5)

create_vm.retire()
provider = create_vm.provider

verify_retirement_state(create_vm)
# Instantiate on each appliance so that browser uses the correct appliance.
vm_per_appliance = {
a: a.provider_based_collection(provider).instantiate(create_vm.name, provider)
for a in replicated_appliances
}

expected_date['end'] = datetime.utcnow() + timedelta(minutes=5)
verify_retirement_date(create_vm, expected_date=expected_date)
with remote_appliance:
provider.create()

with global_appliance:
vm_per_appliance[global_appliance].retire()

with remote_appliance:
verify_retirement_state(vm_per_appliance[remote_appliance])
expected_date['end'] = datetime.utcnow() + timedelta(minutes=5)
verify_retirement_date(vm_per_appliance[remote_appliance], expected_date=expected_date)


@pytest.mark.manual
Expand Down
73 changes: 36 additions & 37 deletions cfme/tests/distributed/test_appliance_replication.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,10 @@ def test_appliance_replicate_between_regions(provider, replicated_appliances):
"""
remote_appliance, global_appliance = replicated_appliances

remote_appliance.browser_steal = True
with remote_appliance:
provider.create()
remote_appliance.collections.infra_providers.wait_for_a_provider()

global_appliance.browser_steal = True
with global_appliance:
global_appliance.collections.infra_providers.wait_for_a_provider()
assert provider.exists
Expand All @@ -69,12 +67,10 @@ def test_external_database_appliance(provider, distributed_appliances):
"""
primary_appliance, secondary_appliance = distributed_appliances

primary_appliance.browser_steal = True
with primary_appliance:
provider.create()
primary_appliance.collections.infra_providers.wait_for_a_provider()

secondary_appliance.browser_steal = True
with secondary_appliance:
secondary_appliance.collections.infra_providers.wait_for_a_provider()
assert provider.exists
Expand All @@ -101,12 +97,10 @@ def test_appliance_replicate_database_disconnection(provider, replicated_applian
sleep(60)
global_appliance.db_service.start()

remote_appliance.browser_steal = True
with remote_appliance:
provider.create()
remote_appliance.collections.infra_providers.wait_for_a_provider()

global_appliance.browser_steal = True
with global_appliance:
global_appliance.collections.infra_providers.wait_for_a_provider()
assert provider.exists
Expand All @@ -129,15 +123,13 @@ def test_appliance_replicate_database_disconnection_with_backlog(provider, repli
"""
remote_appliance, global_appliance = replicated_appliances

remote_appliance.browser_steal = True
with remote_appliance:
provider.create()
global_appliance.db_service.stop()
sleep(60)
global_appliance.db_service.start()
remote_appliance.collections.infra_providers.wait_for_a_provider()

global_appliance.browser_steal = True
with global_appliance:
global_appliance.collections.infra_providers.wait_for_a_provider()
assert provider.exists
Expand All @@ -163,16 +155,20 @@ def test_replication_vm_power_control(provider, create_vm, context, replicated_a
"""
remote_appliance, global_appliance = replicated_appliances

remote_appliance.browser_steal = True
vm_per_appliance = {
a: a.provider_based_collection(provider).instantiate(create_vm.name, provider)
for a in replicated_appliances
}

with remote_appliance:
assert provider.create(validate_inventory=True), "Could not create provider."

global_appliance.browser_steal = True
with global_appliance:
create_vm.power_control_from_cfme(option=create_vm.POWER_OFF, cancel=False)
navigate_to(create_vm.provider, 'Details')
create_vm.wait_for_vm_state_change(desired_state=create_vm.STATE_OFF, timeout=900)
assert create_vm.find_quadicon().data['state'] == 'off', "Incorrect VM quadicon state"
vm = vm_per_appliance[global_appliance]
vm.power_control_from_cfme(option=vm.POWER_OFF, cancel=False)
# navigate_to(provider, 'Details')
vm.wait_for_vm_state_change(desired_state=vm.STATE_OFF, timeout=900)
assert vm.find_quadicon().data['state'] == 'off', "Incorrect VM quadicon state"
assert not create_vm.mgmt.is_running, "VM is still running"


Expand All @@ -198,16 +194,17 @@ def test_replication_connect_to_vm_in_region(provider, replicated_appliances):

vm_name = provider.data['cap_and_util']['chargeback_vm']

remote_appliance.browser_steal = True
vm_per_appliance = {
a: a.provider_based_collection(provider).instantiate(vm_name, provider)
for a in replicated_appliances
}

with remote_appliance:
provider.create()
remote_appliance.collections.infra_providers.wait_for_a_provider()

global_appliance.browser_steal = True
with global_appliance:
collection = global_appliance.provider_based_collection(provider)
vm = collection.instantiate(vm_name, provider)
view = navigate_to(vm, 'Details')
view = navigate_to(vm_per_appliance[global_appliance], 'Details')

initial_count = len(view.browser.window_handles)
main_window = view.browser.current_window_handle
Expand All @@ -233,7 +230,9 @@ def test_replication_connect_to_vm_in_region(provider, replicated_appliances):
'password': conf.credentials['default']['password']
})
view.login.click()
view = vm.create_view(InfraVmDetailsView)

# Use VM instantiated on global_appliance here because we're still using the same browser.
view = vm_per_appliance[global_appliance].create_view(InfraVmDetailsView)
wait_for(lambda: view.is_displayed, message="Wait for VM Details page")


Expand Down Expand Up @@ -265,7 +264,6 @@ def test_appliance_httpd_roles(distributed_appliances):
sid = secondary_appliance.server.sid
secondary_server = primary_appliance.collections.servers.instantiate(sid=sid)

primary_appliance.browser_steal = True
with primary_appliance:
view = navigate_to(secondary_server, 'Server')

Expand Down Expand Up @@ -348,7 +346,6 @@ def test_server_role_failover(distributed_appliances):

# Enable all roles on both appliances.
for appliance in distributed_appliances:
appliance.browser_steal = True
with appliance:
view = navigate_to(appliance.server, 'Server')
view.server_roles.fill(fill_values)
Expand Down Expand Up @@ -393,9 +390,10 @@ def test_appliance_replicate_zones(replicated_appliances):
global_zone = 'global-A'
global_appliance.collections.zones.create(name=global_zone, description=global_zone)

view = navigate_to(global_appliance.server, 'Server')
global_zones = [o.text for o in view.basic_information.appliance_zone.all_options]
assert global_zone in global_zones and remote_zone not in global_zones
with global_appliance:
view = navigate_to(global_appliance.server, 'Server')
global_zones = [o.text for o in view.basic_information.appliance_zone.all_options]
assert global_zone in global_zones and remote_zone not in global_zones


@pytest.mark.tier(2)
Expand All @@ -419,15 +417,16 @@ def test_appliance_replicate_remote_down(replicated_appliances):
"""
remote_appliance, global_appliance = replicated_appliances

global_region = global_appliance.server.zone.region
assert global_region.replication.get_replication_status(host=remote_appliance.hostname), (
"Remote appliance not found on Replication tab after initial configuration.")

result = global_appliance.ssh_client.run_command(
f"firewall-cmd --direct --add-rule ipv4 filter OUTPUT 0 -d {remote_appliance.hostname}"
" -j DROP")
assert result.success, "Could not create firewall rule on global appliance."

global_appliance.browser.widgetastic.refresh()
assert global_region.replication.get_replication_status(host=remote_appliance.hostname), (
"Remote appliance not found on Replication tab after dropped connection.")
with global_appliance:
global_region = global_appliance.server.zone.region
assert global_region.replication.get_replication_status(host=remote_appliance.hostname), (
"Remote appliance not found on Replication tab after initial configuration.")

result = global_appliance.ssh_client.run_command(
f"firewall-cmd --direct --add-rule ipv4 filter OUTPUT 0 -d {remote_appliance.hostname}"
" -j DROP")
assert result.success, "Could not create firewall rule on global appliance."

global_appliance.browser.widgetastic.refresh()
assert global_region.replication.get_replication_status(host=remote_appliance.hostname), (
"Remote appliance not found on Replication tab after dropped connection.")

0 comments on commit 4bd536d

Please sign in to comment.