diff --git a/pytest-embedded-idf/pytest_embedded_idf/unity_tester.py b/pytest-embedded-idf/pytest_embedded_idf/unity_tester.py index a57cc10b..dccb7901 100644 --- a/pytest-embedded-idf/pytest_embedded_idf/unity_tester.py +++ b/pytest-embedded-idf/pytest_embedded_idf/unity_tester.py @@ -418,27 +418,74 @@ def run_single_board_case(self, name: str, reset: bool = False, timeout: float = def run_all_single_board_cases( self, - group: t.Optional[str] = None, + group: t.Optional[t.Union[str, list]] = None, reset: bool = False, timeout: float = 30, run_ignore_cases: bool = False, + name: t.Optional[t.Union[str, list]] = None, + attributes: t.Optional[dict] = None, ) -> None: """ Run all single board cases, including multi_stage cases, and normal cases + Note: + If a group, name, and attributes are used together, + then if test case matches any of them, it will be selected. + Args: - group: test case group - reset: whether to perform a hardware reset before running a case - timeout: timeout. (Default: 30 seconds) - run_ignore_cases: run ignored test cases or not + group: test case group or a list of test case groups to run. Supports 'and' with '&'. + Supports group inversion with '!'. + reset: Whether to perform a hardware reset before running a test case. + timeout: Timeout in seconds. (Default: 30 seconds) + run_ignore_cases: Whether to run ignored test cases or not. + name: test case name or a list of test case names to run. + attributes: Dictionary of attributes to filter and run test cases. """ + if group is None: + group = [] + if isinstance(group, str): + group: t.List[str] = [group] + group: t.List[t.List[str]] = [[_and.strip() for _and in _or.split('&')] for _or in group] + + if isinstance(name, str): + name: t.List[str] = [name] + + def validate_group(case_groups): + if not group: + return True + + for _or in group: + for _and in _or: + invert = _and.startswith('!') + _and = _and.lstrip('!') + result = _and in case_groups + if invert: + result = not result + if not result: + break + else: + return True + + return False + for case in self.test_menu: - if not group or group in case.groups: - if not case.is_ignored or run_ignore_cases: - if case.type == 'normal': - self._run_normal_case(case, reset=reset, timeout=timeout) - elif case.type == 'multi_stage': - self._run_multi_stage_case(case, reset=reset, timeout=timeout) + selected = False + if not group and not name and not attributes: + selected = True + if group and validate_group(case.groups): + selected = True + if name and case.name in name: + selected = True + if attributes and all(case.attributes.get(k) == v for k, v in attributes.items()): + selected = True + + if not selected: + continue + if not case.is_ignored or run_ignore_cases: + if case.type == 'normal': + self._run_normal_case(case, reset=reset, timeout=timeout) + elif case.type == 'multi_stage': + self._run_multi_stage_case(case, reset=reset, timeout=timeout) class _MultiDevTestDut: diff --git a/pytest-embedded-idf/tests/test_idf.py b/pytest-embedded-idf/tests/test_idf.py index cac6ef63..90d003f6 100644 --- a/pytest-embedded-idf/tests/test_idf.py +++ b/pytest-embedded-idf/tests/test_idf.py @@ -716,6 +716,69 @@ def test_dut_run_all_single_board_cases(dut): assert multi_stage.attrib['name'] == 'multiple_stages_test' +def test_dut_run_all_single_board_cases_group(testdir): + testdir.makepyfile(r""" + def test_dut_run_all_single_board_cases(dut): + dut.run_all_single_board_cases(group="normal_case", timeout=10) + """) + testdir.runpytest( + '-s', + '--embedded-services', 'esp,idf', + '--app-path', os.path.join(testdir.tmpdir, 'unit_test_app_esp32'), + '--log-cli-level', 'DEBUG', + '--junitxml', 'report.xml', + ) + + junit_report = ET.parse('report.xml').getroot()[0] + + assert junit_report.attrib['errors'] == '0' + assert junit_report.attrib['failures'] == '1' + assert junit_report.attrib['skipped'] == '0' + assert junit_report.attrib['tests'] == '1' + + +def test_dut_run_all_single_board_cases_invert_group(testdir): + testdir.makepyfile(r""" + def test_dut_run_all_single_board_cases(dut): + dut.run_all_single_board_cases(group="!normal_case", timeout=10) + """) + testdir.runpytest( + '-s', + '--embedded-services', 'esp,idf', + '--app-path', os.path.join(testdir.tmpdir, 'unit_test_app_esp32'), + '--log-cli-level', 'DEBUG', + '--junitxml', 'report.xml', + ) + + junit_report = ET.parse('report.xml').getroot()[0] + + assert junit_report.attrib['errors'] == '0' + assert junit_report.attrib['failures'] == '0' + assert junit_report.attrib['skipped'] == '0' + assert junit_report.attrib['tests'] == '1' + + +def test_dut_run_all_single_board_cases_by_names(testdir): + testdir.makepyfile(r""" + def test_dut_run_all_single_board_cases(dut): + dut.run_all_single_board_cases(name=["normal_case1", "multiple_stages_test"]) + """) + testdir.runpytest( + '-s', + '--embedded-services', 'esp,idf', + '--app-path', os.path.join(testdir.tmpdir, 'unit_test_app_esp32'), + '--log-cli-level', 'DEBUG', + '--junitxml', 'report.xml', + ) + + junit_report = ET.parse('report.xml').getroot()[0] + + assert junit_report.attrib['errors'] == '0' + assert junit_report.attrib['failures'] == '0' + assert junit_report.attrib['skipped'] == '0' + assert junit_report.attrib['tests'] == '2' + + def test_unity_test_case_runner(testdir): testdir.makepyfile(r""" def test_unity_test_case_runner(unity_tester):