From 30ab5fcb07fd928fd767be40a78afb329ebd17db Mon Sep 17 00:00:00 2001 From: Simeon Ehrig Date: Wed, 14 Feb 2024 13:57:59 +0100 Subject: [PATCH] add example to demonstrate the usage of bashi - use example as integration test in the CI - add globals ON and OFF as string equivalent of ON_VER and OFF_VER --- .github/workflows/linters.yml | 21 ++- .github/workflows/testDeploy.yml | 28 ++-- .gitignore | 6 + README.md | 4 + bashi/globals.py | 6 +- bashi/validate.py | 4 +- bashi/versions.py | 7 +- example/example.py | 232 ++++++++++++++++++++++++++ tests/test_filter_compiler_name.py | 4 +- tests/test_filter_compiler_version.py | 4 +- tests/test_nvcc_filter.py | 4 +- 11 files changed, 291 insertions(+), 29 deletions(-) create mode 100644 example/example.py diff --git a/.github/workflows/linters.yml b/.github/workflows/linters.yml index b875da7..895517d 100644 --- a/.github/workflows/linters.yml +++ b/.github/workflows/linters.yml @@ -1,6 +1,17 @@ name: lint-code on: [push, pull_request] jobs: + formatter: + name: runner black code formatter + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: psf/black@stable + with: + options: "--check --verbose" + src: "./" + version: "~= 24.0" + mypy-linter: name: run mypy linter runs-on: ubuntu-latest @@ -17,9 +28,12 @@ jobs: - name: Install mypy run: | pip install mypy - - name: Run mypy + - name: Run mypy on bashi source code run: | mypy bashi + - name: Run mypy on example + run: | + mypy example pylint-linter: name: run pylint linter @@ -37,6 +51,9 @@ jobs: - name: Install pylint run: | pip install pylint - - name: Run pylint + - name: Run pylint on bashi source code run: | pylint bashi + - name: Run pylint on example + run: | + pylint example/example.py diff --git a/.github/workflows/testDeploy.yml b/.github/workflows/testDeploy.yml index 26f3e0d..f856641 100644 --- a/.github/workflows/testDeploy.yml +++ b/.github/workflows/testDeploy.yml @@ -1,20 +1,8 @@ name: test-and-deploy on: [push, pull_request] jobs: - formatter: - name: runner black code formatter - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: psf/black@stable - with: - options: "--check --verbose" - src: "./" - version: "~= 24.0" - unit-tests: runs-on: ubuntu-latest - needs: formatter strategy: matrix: python-version: ['3.10', '3.11', '3.12'] @@ -31,3 +19,19 @@ jobs: pip install . - name: run unit tests run: python -m unittest discover -s tests + integration-tests: + needs: unit-tests + runs-on: ubuntu-latest + name: Run example/example.py as integration test + steps: + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: 3.12 + architecture: x64 + - name: Install bashi + run: | + pip install . + - name: run integration test + run: python example/example.py diff --git a/.gitignore b/.gitignore index 17fb81d..f996f45 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,12 @@ env # coverage report generated by coverage.py .coverage +#################### +# example output +#################### + +example/job.yaml + #################### # IDE files #################### diff --git a/README.md b/README.md index f0f5c54..1f48f5a 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,10 @@ A library to provide a job generator for CI's for alpaka based projects. The main component of the `bashi` library is combinatorics. Due to the wide spread of the field, there are different words for the same things. Therefore, `bashi` has introduced a naming guideline that is used for function and parameter names and documentation. Please read the [naming guidelines](docs/naming.md). +# Example + +An example of the use of the `bashi` library can be found in [example/example.py](example/example.py). It shows how to use the library to create a `combination-list` from a `parameter-value-matrix`. The example also uses a custom filter. For more details, please read the module documentation of [example/example.py](example/example.py). + # Developing It is strongly recommended to use a Python environment for developing the code, such as `virtualenv` or a `conda` environment. The following code uses a `virtualenv`. diff --git a/bashi/globals.py b/bashi/globals.py index be122e6..a914a29 100644 --- a/bashi/globals.py +++ b/bashi/globals.py @@ -45,5 +45,7 @@ BOOST: str = "boost" CXX_STANDARD: str = "cxx_standard" -OFF_VER: packaging.version.Version = packaging.version.parse("0.0.0") -ON_VER: packaging.version.Version = packaging.version.parse("1.0.0") +OFF: str = "0.0.0" +ON: str = "1.0.0" +OFF_VER: packaging.version.Version = packaging.version.parse(OFF) +ON_VER: packaging.version.Version = packaging.version.parse(ON) diff --git a/bashi/validate.py b/bashi/validate.py index e0ff8f2..6080734 100644 --- a/bashi/validate.py +++ b/bashi/validate.py @@ -80,10 +80,10 @@ def __call__( version: str = str(values) if version == "OFF": - version = "0.0.0" + version = OFF if version == "ON": - version = "1.0.0" + version = ON # use parse() function to validate that the version has a valid shape try: diff --git a/bashi/versions.py b/bashi/versions.py index 17c2cc9..1343d7f 100644 --- a/bashi/versions.py +++ b/bashi/versions.py @@ -115,16 +115,13 @@ def is_supported_version(name: ValueName, version: ValueVersion) -> bool: local_versions = VERSIONS.copy() - off: str = "0.0.0" - on: str = "1.0.0" - local_versions[CLANG_CUDA] = local_versions[CLANG] - local_versions[ALPAKA_ACC_GPU_CUDA_ENABLE] = [off] + local_versions[ALPAKA_ACC_GPU_CUDA_ENABLE] = [OFF] local_versions[ALPAKA_ACC_GPU_CUDA_ENABLE] += VERSIONS[NVCC] for backend_name in BACKENDS: if backend_name != ALPAKA_ACC_GPU_CUDA_ENABLE: - local_versions[backend_name] = [off, on] + local_versions[backend_name] = [OFF, ON] for ver in local_versions[name]: if pkv.parse(str(ver)) == version: diff --git a/example/example.py b/example/example.py new file mode 100644 index 0000000..33d7387 --- /dev/null +++ b/example/example.py @@ -0,0 +1,232 @@ +"""The example shows how the bashi library can be used. The example does the following things: + +1. generate a parameter-value-matrix with all software versions supported by bashi +2. generate a combination-list + - the generator uses the bashi filter rules and a custom filter + - The custom filter filters the backend configurations + - either all CPU backends and no GPU backend are activated + - or a single gpu backend is enabled and all other backends are disabled +3. check whether all expected parameter-value pairs are contained in the combination-list + - all pairs that are prohibited by the user-defined filter are removed from the list of + expected parameter-value-pairs +4. generate a job.yaml from the combination-list +""" + +from typing import List +import os +from bashi.generator import generate_combination_list +from bashi.utils import ( + get_expected_bashi_parameter_value_pairs, + check_parameter_value_pair_in_combination_list, + remove_parameter_value_pair, + create_parameter_value_pair, +) +from bashi.types import ( + ParameterValuePair, + ParameterValueTuple, + ParameterValueMatrix, + CombinationList, +) +from bashi.globals import * # pylint: disable=wildcard-import,unused-wildcard-import +from bashi.versions import get_parameter_value_matrix, VERSIONS + + +# pylint: disable=too-many-branches +def verify(combination_list: CombinationList, param_value_matrix: ParameterValueMatrix) -> bool: + """Check if all expected parameter-value-pairs exists in the combination-list. + + Args: + combination_list (CombinationList): The generated combination list. + param_value_matrix (ParameterValueMatrix): The expected parameter-values-pairs are generated + from the parameter-value-list. + + Returns: + bool: True if it found all pairs + """ + expected_param_val_tuple: List[ParameterValuePair] = get_expected_bashi_parameter_value_pairs( + param_value_matrix + ) + + gpu_backends = set( + [ + ALPAKA_ACC_GPU_CUDA_ENABLE, + ALPAKA_ACC_GPU_HIP_ENABLE, + ALPAKA_ACC_SYCL_ENABLE, + ] + ) + + # if one of the GPU backend is enabled, all other backends needs to be disabled + # special case CUDA backend: instead it has the version on or off, it has off or a version + # number + for gpu_backend in gpu_backends: + if gpu_backend == ALPAKA_ACC_GPU_CUDA_ENABLE: + gpu_versions = VERSIONS[NVCC] + else: + gpu_versions = [ON] + for gpu_version in gpu_versions: + for other_backend in set(BACKENDS) - set([gpu_backend]): + if other_backend == ALPAKA_ACC_GPU_CUDA_ENABLE: + other_backend_versions = VERSIONS[NVCC] + else: + other_backend_versions = [ON] + + for other_backend_version in other_backend_versions: + + remove_parameter_value_pair( + to_remove=create_parameter_value_pair( + gpu_backend, + gpu_backend, + gpu_version, + other_backend, + other_backend, + other_backend_version, + ), + parameter_value_pairs=expected_param_val_tuple, + ) + remove_parameter_value_pair( + to_remove=create_parameter_value_pair( + other_backend, + other_backend, + other_backend_version, + gpu_backend, + gpu_backend, + gpu_version, + ), + parameter_value_pairs=expected_param_val_tuple, + ) + + cpu_backends = set(BACKENDS) - gpu_backends + # remove all pairs, which contains two cpu backends and on of the backends is enabled and the + # other is disabled + for cpu_backend in cpu_backends: + for other_cpu_backend in cpu_backends: + if cpu_backend != other_cpu_backend: + remove_parameter_value_pair( + to_remove=create_parameter_value_pair( + cpu_backend, + cpu_backend, + ON_VER, + other_cpu_backend, + other_cpu_backend, + OFF_VER, + ), + parameter_value_pairs=expected_param_val_tuple, + ) + remove_parameter_value_pair( + to_remove=create_parameter_value_pair( + other_cpu_backend, + other_cpu_backend, + OFF_VER, + cpu_backend, + cpu_backend, + ON_VER, + ), + parameter_value_pairs=expected_param_val_tuple, + ) + + return check_parameter_value_pair_in_combination_list( + combination_list, expected_param_val_tuple + ) + + +def custom_filter(row: ParameterValueTuple) -> bool: + """Filter function defined by the user. In this case, remove some backend combinations, see + module documentation. + + Args: + row (ParameterValueTuple): parameter-value-tuple + + Returns: + bool: True if the tuple is valid + """ + gpu_backends = set( + [ + ALPAKA_ACC_GPU_CUDA_ENABLE, + ALPAKA_ACC_GPU_HIP_ENABLE, + ALPAKA_ACC_SYCL_ENABLE, + ] + ) + for single_gpu_backend in gpu_backends: + if single_gpu_backend in row and row[single_gpu_backend].version != OFF_VER: + for backend in BACKENDS: + if backend != single_gpu_backend: + if backend in row and row[backend].version != OFF_VER: + return False + + cpu_backends = set(BACKENDS) - gpu_backends + for cpu_backend in cpu_backends: + if cpu_backend in row and row[cpu_backend].version == ON_VER: + # all other cpu backends needs to be enabled + for other_cpu_backend in cpu_backends - set(cpu_backend): + if other_cpu_backend in row and row[other_cpu_backend].version == OFF_VER: + return False + # all other gpu backends needs to be disabled + for gpu_backend in gpu_backends: + if gpu_backend in row and row[gpu_backend].version != OFF_VER: + return False + + return True + + +def create_yaml(combination_list: CombinationList): + """Create an example GitLab CI job yaml from the combination-list and write it to a file. + Normally, a yaml library should be used. But for a small example it is better to create the yaml + file by hand instead of adding a dependency. + + Args: + combination_list (CombinationList): combination-list + """ + job_yaml = "" + for job_num, comb in enumerate(combination_list): + job_yaml += f"ci_job_{job_num}:\n" + job_yaml += " variables:\n" + for param, param_val in comb.items(): + val_name, val_version = param_val + if param == HOST_COMPILER: + job_yaml += f" - HOST_COMPILER_NAME: {val_name}\n" + job_yaml += f" - HOST_COMPILER_VERSION: {val_version}\n" + elif param == DEVICE_COMPILER: + job_yaml += f" - DEVICE_COMPILER_NAME: {val_name}\n" + job_yaml += f" - DEVICE_COMPILER_VERSION: {val_version}\n" + elif param in BACKENDS: + if val_version == ON_VER: + job_yaml += f" - {val_name.upper()}: ON\n" + elif val_version == OFF_VER: + job_yaml += f" - {val_name.upper()}: OFF\n" + else: + job_yaml += f" - {val_name.upper()}: {val_version}\n" + else: + job_yaml += f" - {val_name.upper()}: {val_version}\n" + job_yaml += " script:\n" + job_yaml += " - ./run_tests.sh\n" + job_yaml += " tags:\n" + if comb[ALPAKA_ACC_SYCL_ENABLE].version == ON_VER: + job_yaml += " - intel-gpu-runner\n" + elif comb[ALPAKA_ACC_GPU_HIP_ENABLE].version == ON_VER: + job_yaml += " - amd-gpu-runner\n" + elif comb[ALPAKA_ACC_GPU_CUDA_ENABLE].version != OFF_VER: + job_yaml += " - nvidia-gpu-runner\n" + else: + job_yaml += " - cpu-runner\n" + + job_yaml += "\n" + + # generate job.yaml always in the same folder where the example.py is located + job_yaml_path = os.path.join(os.path.dirname(__file__), "job.yaml") + print(f"write GitLab CI job.yaml to {job_yaml_path}") + with open(job_yaml_path, "w", encoding="UTF-8") as output: + output.write(job_yaml) + + +if __name__ == "__main__": + param_matrix = get_parameter_value_matrix() + + comb_list: CombinationList = generate_combination_list( + parameter_value_matrix=param_matrix, custom_filter=custom_filter + ) + + print("verify combination-list") + verify(comb_list, param_matrix) + + create_yaml(comb_list) + print(f"number of combinations: {len(comb_list)}") diff --git a/tests/test_filter_compiler_name.py b/tests/test_filter_compiler_name.py index 86f2b15..93d44a5 100644 --- a/tests/test_filter_compiler_name.py +++ b/tests/test_filter_compiler_name.py @@ -50,7 +50,7 @@ def test_valid_combination_rule_n3(self): HOST_COMPILER: ppv((CLANG, 10)), DEVICE_COMPILER: ppv((CLANG, 10)), ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE: ppv( - (ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE, "1.0.0") + (ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE, ON) ), CMAKE: ppv((CMAKE, 3.24)), BOOST: ppv((BOOST, 1.78)), @@ -106,7 +106,7 @@ def test_invalid_combination_rule_n3(self): HOST_COMPILER: ppv((GCC, 15)), DEVICE_COMPILER: ppv((CLANG, 10)), ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE: ppv( - (ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE, "1.0.0") + (ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE, ON) ), CMAKE: ppv((CMAKE, 3.24)), BOOST: ppv((BOOST, 1.78)), diff --git a/tests/test_filter_compiler_version.py b/tests/test_filter_compiler_version.py index 44c56a3..25742a4 100644 --- a/tests/test_filter_compiler_version.py +++ b/tests/test_filter_compiler_version.py @@ -49,7 +49,7 @@ def test_valid_combination_rule_v1(self): HOST_COMPILER: ppv((CLANG, 14)), DEVICE_COMPILER: ppv((CLANG, 14)), ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE: ppv( - (ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE, "1.0.0") + (ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE, ON) ), CMAKE: ppv((CMAKE, 3.24)), BOOST: ppv((BOOST, 1.78)), @@ -106,7 +106,7 @@ def test_invalid_combination_rule_v1(self): HOST_COMPILER: ppv((GCC, 15)), DEVICE_COMPILER: ppv((GCC, 10)), ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE: ppv( - (ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE, "1.0.0") + (ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE, ON) ), CMAKE: ppv((CMAKE, 3.24)), BOOST: ppv((BOOST, 1.78)), diff --git a/tests/test_nvcc_filter.py b/tests/test_nvcc_filter.py index 908131d..e675e40 100644 --- a/tests/test_nvcc_filter.py +++ b/tests/test_nvcc_filter.py @@ -133,7 +133,7 @@ def test_invalid_combination_rule_n2(self): { HOST_COMPILER: ppv((HIPCC, "5.3")), ALPAKA_ACC_CPU_B_SEQ_T_SEQ_ENABLE: ppv( - (ALPAKA_ACC_CPU_B_TBB_T_SEQ_ENABLE, "1.0.0") + (ALPAKA_ACC_CPU_B_TBB_T_SEQ_ENABLE, ON) ), DEVICE_COMPILER: ppv((NVCC, "12.3")), } @@ -178,7 +178,7 @@ def test_valid_combination_rule_n2(self): { HOST_COMPILER: ppv((CLANG, "14")), ALPAKA_ACC_CPU_B_SEQ_T_SEQ_ENABLE: ppv( - (ALPAKA_ACC_CPU_B_TBB_T_SEQ_ENABLE, "1.0.0") + (ALPAKA_ACC_CPU_B_TBB_T_SEQ_ENABLE, ON) ), DEVICE_COMPILER: ppv((NVCC, "10.1")), }