Skip to content

Commit

Permalink
[DPE-4255] Fix tests (#247)
Browse files Browse the repository at this point in the history
* Upgrade dependencies

* fix tests

* fix tests

* fix tests

* add self-signed-certificates

* fix users initialisation

* fix users initialisation

* fix users initialisation

* fix users initialisation

* update CI libs

* update CI libs

* update CI libs

* update CI libs

* update CI libs

* update CI libs

* update CI libs

* update CI libs

* update CI libs

* update CI libs

* fix tox tasks

* fix tox tasks

* fix tox tasks

* Added missing group mark

* Added pytest asyncio

* added conftests + fixed paths

* Added logs

* revert

* updated data interfaces client applications

* removed trailing slash

* fixed app charm path

* removed unnecessary restart on TLS cert available +

* move user init code out of repl set init code

* fix user of db_intialised

* fix unit tests

* remove check for repl set initialised in init users

* fix use of self.db_initialised and update unit tests

* Update ci.yaml

* mock tenacity to shorten unit test time

* remove print

* bumped dp worfklows lib

* fix tox

* fix jira sync

* uncommented unit test in CI

* Update .github/workflows/ci.yaml

Co-authored-by: Carl Csaposs <[email protected]>

* Update .github/workflows/ci.yaml

Co-authored-by: Carl Csaposs <[email protected]>

* Update tox.ini

Co-authored-by: Carl Csaposs <[email protected]>

* PR feedback

* PR feedback

* Update libs

* Update .github/workflows/ci.yaml

Co-authored-by: Carl Csaposs <[email protected]>

* Update libs

* juju version as a variable

* pr feedback

* pr feedback

* env variables

* env variables removal

* env variables removal

* env variables removal

* fix rel tests

* match structure of VM HA tests

* fix metrics tests by using sessions

* update wait in TLS tests

* revert changes to mongo_op due to failures in rel tests

* update handling of strings that cannot be formated with json laods

* update time monitoring for tls tests + additional fixes for relation tests

* make newuser check more robust

* revert mongo op changes

* actually test new users privs

* skip backup tests

* make rel test checks more robust

---------

Co-authored-by: Mehdi-Bendriss <[email protected]>
Co-authored-by: Carl Csaposs <[email protected]>
  • Loading branch information
3 people authored May 6, 2024
1 parent 015a5de commit dd3ceee
Show file tree
Hide file tree
Showing 6 changed files with 219 additions and 189 deletions.
254 changes: 132 additions & 122 deletions poetry.lock

Large diffs are not rendered by default.

10 changes: 9 additions & 1 deletion tests/integration/backup_tests/test_backups.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
logger = logging.getLogger(__name__)


# TODO this should be refactored to remove duplication
@pytest_asyncio.fixture
async def continuous_writes_to_db(ops_test: OpsTest):
"""Continuously writes to DB for the duration of the test."""
Expand Down Expand Up @@ -72,6 +71,7 @@ async def add_writes_to_db(ops_test: OpsTest):
await clear_writes_action.wait()


@pytest.mark.skip("Skipping tests until fixing backup tests are addressed (DPE-4264).")
@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_build_and_deploy(ops_test: OpsTest) -> None:
Expand Down Expand Up @@ -108,6 +108,7 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
await ops_test.model.wait_for_idle()


@pytest.mark.skip("Skipping tests until fixing backup tests are addressed (DPE-4264).")
@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_blocked_incorrect_creds(ops_test: OpsTest) -> None:
Expand Down Expand Up @@ -138,6 +139,7 @@ async def test_blocked_incorrect_creds(ops_test: OpsTest) -> None:
assert db_unit.workload_status_message == "s3 credentials are incorrect."


@pytest.mark.skip("Skipping tests until fixing backup tests are addressed (DPE-4264).")
@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_blocked_incorrect_conf(ops_test: OpsTest) -> None:
Expand All @@ -156,6 +158,7 @@ async def test_blocked_incorrect_conf(ops_test: OpsTest) -> None:
assert db_unit.workload_status_message == "s3 configurations are incompatible."


@pytest.mark.skip("Skipping tests until fixing backup tests are addressed (DPE-4264).")
@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_ready_correct_conf(ops_test: OpsTest) -> None:
Expand All @@ -180,6 +183,7 @@ async def test_ready_correct_conf(ops_test: OpsTest) -> None:
)


@pytest.mark.skip("Skipping tests until fixing backup tests are addressed (DPE-4264).")
@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_create_and_list_backups(ops_test: OpsTest) -> None:
Expand Down Expand Up @@ -208,6 +212,7 @@ async def test_create_and_list_backups(ops_test: OpsTest) -> None:
assert backups == 1, "Backup not created."


@pytest.mark.skip("Skipping tests until fixing backup tests are addressed (DPE-4264).")
@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_multi_backup(ops_test: OpsTest, continuous_writes_to_db) -> None:
Expand Down Expand Up @@ -295,6 +300,7 @@ async def test_multi_backup(ops_test: OpsTest, continuous_writes_to_db) -> None:
assert backups == 2, "Backup not created in bucket on AWS."


@pytest.mark.skip("Skipping tests until fixing backup tests are addressed (DPE-4264).")
@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_restore(ops_test: OpsTest, continuous_writes_to_db) -> None:
Expand Down Expand Up @@ -356,6 +362,7 @@ async def test_restore(ops_test: OpsTest, continuous_writes_to_db) -> None:


# TODO remove unstable mark once juju issue with secrets is resolved
@pytest.mark.skip("Skipping tests until fixing backup tests are addressed (DPE-4264).")
@pytest.mark.group(1)
@pytest.mark.unstable
@pytest.mark.parametrize("cloud_provider", ["AWS", "GCP"])
Expand Down Expand Up @@ -466,6 +473,7 @@ async def test_restore_new_cluster(ops_test: OpsTest, continuous_writes_to_db, c
# await helpers.destroy_cluster(ops_test, cluster_name=NEW_CLUSTER)


@pytest.mark.skip("Skipping tests until fixing backup tests are addressed (DPE-4264).")
@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_update_backup_password(ops_test: OpsTest) -> None:
Expand Down
96 changes: 48 additions & 48 deletions tests/integration/ha_tests/test_ha.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,54 @@ async def test_build_and_deploy(ops_test: OpsTest, cmd_mongodb_charm) -> None:
await relate_mongodb_and_application(ops_test, mongodb_application_name, application_name)


@pytest.mark.group(1)
async def test_storage_re_use(ops_test, continuous_writes):
"""Verifies that database units with attached storage correctly repurpose storage.

It is not enough to verify that Juju attaches the storage. Hence test checks that the mongod
properly uses the storage that was provided. (ie. doesn't just re-sync everything from
primary, but instead computes a diff between current storage and primary storage.)
"""
app = await get_application_name(ops_test, APP_NAME)

# removing the only replica can be disastrous
if len(ops_test.model.applications[app].units) < 2:
await ops_test.model.applications[app].add_unit(count=1)
await ops_test.model.wait_for_idle(apps=[app], status="active", timeout=1000)

# remove a unit and attach it's storage to a new unit
current_number_units = len(ops_test.model.applications[app].units)
await scale_application(ops_test, app, current_number_units - 1)
await ops_test.model.wait_for_idle(
apps=[app], status="active", timeout=1000, wait_for_exact_units=(current_number_units - 1)
)

# k8s will automatically use the old storage from the storage pool
removal_time = datetime.now(timezone.utc).timestamp()
await scale_application(ops_test, app, current_number_units)
await ops_test.model.wait_for_idle(
apps=[app], status="active", timeout=1000, wait_for_exact_units=(current_number_units)
)

# for this test, we only scaled up the application by one unit. So it the highest unit will be
# the newest unit.
new_unit = get_highest_unit(ops_test, app)
assert await reused_storage(
ops_test, new_unit, removal_time
), "attached storage not properly re-used by MongoDB."

# verify presence of primary, replica set member configuration, and number of primaries
hostnames = await get_units_hostnames(ops_test)
member_hosts = await fetch_replica_set_members(ops_test)
assert set(member_hosts) == set(hostnames)
assert (
await count_primaries(ops_test) == 1
), "there is more than one primary in the replica set."

# verify all units are up to date.
await verify_writes(ops_test)


@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_scale_up_capablities(ops_test: OpsTest, continuous_writes) -> None:
Expand Down Expand Up @@ -620,51 +668,3 @@ async def test_network_cut(ops_test: OpsTest, continuous_writes, chaos_mesh):

# verify that old primary is up to date.
await verify_writes(ops_test)


@pytest.mark.group(1)
async def test_storage_re_use(ops_test, continuous_writes):
"""Verifies that database units with attached storage correctly repurpose storage.

It is not enough to verify that Juju attaches the storage. Hence test checks that the mongod
properly uses the storage that was provided. (ie. doesn't just re-sync everything from
primary, but instead computes a diff between current storage and primary storage.)
"""
app = await get_application_name(ops_test, APP_NAME)

# removing the only replica can be disastrous
if len(ops_test.model.applications[app].units) < 2:
await ops_test.model.applications[app].add_unit(count=1)
await ops_test.model.wait_for_idle(apps=[app], status="active", timeout=1000)

# remove a unit and attach it's storage to a new unit
current_number_units = len(ops_test.model.applications[app].units)
await scale_application(ops_test, app, current_number_units - 1)
await ops_test.model.wait_for_idle(
apps=[app], status="active", timeout=1000, wait_for_exact_units=(current_number_units - 1)
)

# k8s will automatically use the old storage from the storage pool
removal_time = datetime.now(timezone.utc).timestamp()
await scale_application(ops_test, app, current_number_units)
await ops_test.model.wait_for_idle(
apps=[app], status="active", timeout=1000, wait_for_exact_units=(current_number_units)
)

# for this test, we only scaled up the application by one unit. So it the highest unit will be
# the newest unit.
new_unit = get_highest_unit(ops_test, app)
assert await reused_storage(
ops_test, new_unit, removal_time
), "attached storage not properly re-used by MongoDB."

# verify presence of primary, replica set member configuration, and number of primaries
hostnames = await get_units_hostnames(ops_test)
member_hosts = await fetch_replica_set_members(ops_test)
assert set(member_hosts) == set(hostnames)
assert (
await count_primaries(ops_test) == 1
), "there is more than one primary in the replica set."

# verify all units are up to date.
await verify_writes(ops_test)
16 changes: 7 additions & 9 deletions tests/integration/metrics_tests/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from pathlib import Path

import pytest
import urllib3
import requests
import yaml
from pytest_operator.plugin import OpsTest

Expand Down Expand Up @@ -41,17 +41,15 @@ async def verify_endpoints(ops_test: OpsTest, unit):
"""Verifies mongodb endpoint is functional on a given unit."""
app_name = await get_app_name(ops_test)
unit_id = unit.name.split("/")[1]
http = urllib3.PoolManager()
with requests.Session() as http:
unit_address = await get_address(ops_test=ops_test, app_name=app_name, unit_num=unit_id)
mongodb_exporter_url = f"http://{unit_address}:{MONGODB_EXPORTER_PORT}/metrics"
mongo_resp = http.get(mongodb_exporter_url)

unit_address = await get_address(ops_test=ops_test, app_name=app_name, unit_num=unit_id)
mongodb_exporter_url = f"http://{unit_address}:{MONGODB_EXPORTER_PORT}/metrics"
mongo_resp = http.request("GET", mongodb_exporter_url)

assert mongo_resp.status == 200
assert mongo_resp.status_code == 200

# if configured correctly there should be more than one mongodb metric present
mongodb_metrics = mongo_resp._body.decode("utf8")
assert mongodb_metrics.count("mongo") > 10
assert mongo_resp.text.count("mongo") > 10


@pytest.mark.group(1)
Expand Down
12 changes: 9 additions & 3 deletions tests/integration/relation_tests/test_charm_relations.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ async def test_deploy_charms(ops_test: OpsTest):
ops_test.model.deploy(
database_charm,
application_name=DATABASE_APP_NAME,
resources=db_resources,
num_units=REQUIRED_UNITS,
)
)
Expand Down Expand Up @@ -303,12 +304,17 @@ async def test_user_with_extra_roles(ops_test: OpsTest):
result = await run_mongo_op(
ops_test, cmd, f'"{connection_string}"', stringify=False, expect_json_load=False
)
assert 'user" : "newTestUser"' in result.data
cmd = "db.getUsers();"

result = await run_mongo_op(
ops_test, f'"{cmd}"', f'"{connection_string}"', stringify=False, expect_json_load=False
)
assert "application_first_database.newTestUser" in str(result)
cmd = 'db = db.getSiblingDB("new_database"); db.test_collection.insertOne({"test": "one"});'
result = await run_mongo_op(
ops_test, cmd, f'"{connection_string}"', stringify=False, ignore_errors=True
ops_test, cmd, f'"{connection_string}"', stringify=False, expect_json_load=False
)
assert '"acknowledged" : true' in result.data
assert "acknowledged: true" in str(result.data)


@pytest.mark.group(1)
Expand Down
20 changes: 14 additions & 6 deletions tests/integration/tls_tests/test_tls.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,15 +163,19 @@ async def test_rotate_tls_key(ops_test: OpsTest) -> None:
)
await check_certs_correctly_distributed(ops_test, unit)

# restart times for mongod_service are in the format H:M, meaning that if we want to check that
# the restart time is different, we have to ensure that a minute has passed.
time.sleep(61)

# set external and internal key using auto-generated key for each unit
for unit in ops_test.model.applications[app_name].units:
action = await unit.run_action(action_name="set-tls-private-key")
action = await action.wait()
assert action.status == "completed", "setting external and internal key failed."

# wait for certificate to be available and processed. Can get receive two certificate
# available events and restart twice so we do not wait for idle here
time.sleep(60)
# wait for certificate to be available and processed. Larger than normal idle period so that
# we guarantee that the charm receives + processes all events
await ops_test.model.wait_for_idle(status="active", timeout=1000, idle_period=30)

# After updating both the external key and the internal key a new certificate request will be
# made; then the certificates should be available and updated.
Expand Down Expand Up @@ -225,6 +229,10 @@ async def test_set_tls_key(ops_test: OpsTest) -> None:
ops_test, unit.name, DB_SERVICE
)

# restart times for mongod_service are in the format H:M, meaning that if we want to check that
# the restart time is different, we have to ensure that a minute has passed.
time.sleep(61)

with open(f"{TLS_TEST_DATA}/internal-key.pem") as f:
internal_key_contents = f.readlines()
internal_key_contents = "".join(internal_key_contents)
Expand All @@ -249,9 +257,9 @@ async def test_set_tls_key(ops_test: OpsTest) -> None:
action = await action.wait()
assert action.status == "completed", "setting external and internal key failed."

# wait for certificate to be available and processed. Can get receive two certificate
# available events and restart twice so we do not wait for idle here
time.sleep(60)
# wait for certificate to be available and processed. Larger than normal idle period so that
# we guarantee that the charm receives + processes all events
await ops_test.model.wait_for_idle(status="active", timeout=1000, idle_period=30)

# After updating both the external key and the internal key a new certificate request will be
# made; then the certificates should be available and updated.
Expand Down

0 comments on commit dd3ceee

Please sign in to comment.