diff --git a/anta/catalog.py b/anta/catalog.py index b5a77ad25..1803a7176 100644 --- a/anta/catalog.py +++ b/anta/catalog.py @@ -300,7 +300,6 @@ def __init__( self.tag_to_tests: defaultdict[str | None, set[AntaTestDefinition]] = defaultdict(set) self.tests_without_tags: set[AntaTestDefinition] = set() self.indexes_built: bool = False - self.final_tests_count: int = 0 @property def filename(self) -> Path | None: diff --git a/anta/runner.py b/anta/runner.py index 6e3290267..ea30a7a88 100644 --- a/anta/runner.py +++ b/anta/runner.py @@ -147,6 +147,7 @@ def prepare_tests( device_to_tests: defaultdict[AntaDevice, set[AntaTestDefinition]] = defaultdict(set) # Create AntaTestRunner tuples from the tags + final_tests_count = 0 for device in inventory.devices: if tags: if not any(tag in device.tags for tag in tags): @@ -159,9 +160,9 @@ def prepare_tests( # Add the tests with matching tags from device tags device_to_tests[device].update(catalog.get_tests_by_tags(device.tags)) - catalog.final_tests_count += len(device_to_tests[device]) + final_tests_count += len(device_to_tests[device]) - if catalog.final_tests_count == 0: + if final_tests_count == 0: msg = ( f"There are no tests{f' matching the tags {tags} ' if tags else ' '}to run in the current test catalog and device inventory, please verify your inputs." ) @@ -171,13 +172,15 @@ def prepare_tests( return device_to_tests -def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinition]]) -> list[Coroutine[Any, Any, TestResult]]: +def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinition]], manager: ResultManager) -> list[Coroutine[Any, Any, TestResult]]: """Get the coroutines for the ANTA run. Parameters ---------- selected_tests A mapping of devices to the tests to run. The selected tests are generated by the `prepare_tests` function. + manager + A ResultManager Returns ------- @@ -189,6 +192,7 @@ def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinitio for test in test_definitions: try: test_instance = test.test(device=device, inputs=test.inputs) + manager.add(test_instance.result) coros.append(test_instance.test()) except Exception as e: # noqa: PERF203, BLE001 # An AntaTest instance is potentially user-defined code. @@ -256,38 +260,35 @@ async def main( # noqa: PLR0913 selected_tests = prepare_tests(selected_inventory, catalog, tests, tags) if selected_tests is None: return + final_tests_count = sum(len(tests) for tests in selected_tests.values()) run_info = ( "--- ANTA NRFU Run Information ---\n" f"Number of devices: {len(inventory)} ({len(selected_inventory)} established)\n" - f"Total number of selected tests: {catalog.final_tests_count}\n" + f"Total number of selected tests: {final_tests_count}\n" f"Maximum number of open file descriptors for the current ANTA process: {limits[0]}\n" "---------------------------------" ) logger.info(run_info) - if catalog.final_tests_count > limits[0]: + if final_tests_count > limits[0]: logger.warning( "The number of concurrent tests is higher than the open file descriptors limit for this ANTA process.\n" "Errors may occur while running the tests.\n" "Please consult the ANTA FAQ." ) - coroutines = get_coroutines(selected_tests) + coroutines = get_coroutines(selected_tests, manager) if dry_run: logger.info("Dry-run mode, exiting before running the tests.") - for coro in coroutines: - coro.close() return if AntaTest.progress is not None: AntaTest.nrfu_task = AntaTest.progress.add_task("Running NRFU Tests...", total=len(coroutines)) with Catchtime(logger=logger, message="Running ANTA tests"): - test_results = await asyncio.gather(*coroutines) - for r in test_results: - manager.add(r) + await asyncio.gather(*coroutines) log_cache_statistics(selected_inventory.devices) diff --git a/tests/benchmark/test_anta.py b/tests/benchmark/test_anta.py index 82d08cf6e..a917edfd9 100644 --- a/tests/benchmark/test_anta.py +++ b/tests/benchmark/test_anta.py @@ -44,13 +44,9 @@ def bench() -> ResultManager: manager = benchmark(bench) logging.disable(logging.NOTSET) - if len(manager.results) != 0: - pytest.fail("ANTA Dry-Run mode should not return any result", pytrace=False) - if catalog.final_tests_count != len(inventory) * len(catalog.tests): - pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} selected tests but got {catalog.final_tests_count}", pytrace=False) - bench_info = ( - "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Selected tests: {catalog.final_tests_count}\n" "-----------------------------------------------" - ) + if len(manager.results) != len(inventory) * len(catalog.tests): + pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} selected tests but got {len(manager.results)}", pytrace=False) + bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Selected tests: {len(manager.results)}\n" "-----------------------------------------------" logger.info(bench_info)