Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clean up NVIDIA HPC Benchmark application definitions #836

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
124 changes: 5 additions & 119 deletions var/ramble/repos/builtin/applications/nvidia-hpcg/application.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,12 @@

from ramble.appkit import *
from ramble.base_app.builtin.hpcg import Hpcg as BaseHpcg
from ramble.base_app.builtin.nvidia_hpc_benchmarks import (
NvidiaHpcBenchmarks as NvidiaHpcBase,
)


class NvidiaHpcg(BaseHpcg):
class NvidiaHpcg(BaseHpcg, NvidiaHpcBase):
"""NVIDIA's HPCG benchmark accelerates the High Performance Conjugate
Gradients (HPCG) Benchmark. HPCG is a software package that performs a
fixed number of multigrid preconditioned (using a symmetric Gauss-Seidel
Expand All @@ -23,127 +26,10 @@ class NvidiaHpcg(BaseHpcg):

executable(
"execute",
"./hpcg.sh --dat {experiment_run_dir}/hpcg.dat",
"{internal_mpi_command} /workspace/hpcg.sh --dat {experiment_run_dir}/hpcg.dat",
use_mpi=True,
)

workload("standard", executables=["execute"])

workload_group("all_workloads", workloads=["standard"], mode="append")

workload_variable(
"nvshmem_disable_cuda_vmm",
default="1",
description="",
workload_group="all_workloads",
)
environment_variable(
"NVSHMEM_DISABLE_CUDA_VMM",
"{nvshmem_disable_cuda_vmm}",
description="",
workload_group="all_workloads",
)

workload_variable(
"hpl_fct_comm_policy",
default="1",
description="",
workload_group="all_workloads",
)
environment_variable(
"HPL_FCT_COMM_POLICY",
"{hpl_fct_comm_policy}",
description="",
workload_group="all_workloads",
)

workload_variable(
"hpl_use_nvshmem",
default="0",
description="Whether to use NVSHMEM or not",
workload_group="all_workloads",
)
environment_variable(
"HPL_USE_NVSHMEM",
"{hpl_use_nvshmem}",
description="Whether or not to use NVSHMEM",
workload_group="all_workloads",
)

workload_variable(
"hpl_p2p_as_bcast",
default="0",
description="0 = ncclBcast, 1 = ncclSend/Recv",
workload_group="all_workloads",
)
environment_variable(
"HPL_P2P_AS_BCAST",
"{hpl_p2p_as_bcast}",
description="Whether or not to use P2P for BCAST",
workload_group="all_workloads",
)

workload_variable(
"pmix_mca_gds",
default="^ds12",
description="",
workload_group="all_workloads",
)
environment_variable(
"PMIX_MCA_gds",
"{pmix_mca_gds}",
description="PMIX MCA gds",
workload_group="all_workloads",
)

workload_variable(
"ompi_mca_btl",
default="^vader,tcp,openib,uct",
description="",
workload_group="all_workloads",
)
environment_variable(
"OMPI_MCA_btl",
"{ompi_mca_btl}",
description="OpenMPI MCA btl",
workload_group="all_workloads",
)

workload_variable(
"ompi_mca_pml",
default="ucx",
description="",
workload_group="all_workloads",
)
environment_variable(
"OMPI_MCA_pml",
"{ompi_mca_pml}",
description="OpenMPI MCA pml",
workload_group="all_workloads",
)

workload_variable(
"ucx_net_devices",
default="enp6s0,enp12s0,enp134s0,enp140s0",
description="",
workload_group="all_workloads",
)
environment_variable(
"UCX_NET_DEVICES",
"{ucx_net_devices}",
description="UCX Net Devices",
workload_group="all_workloads",
)

workload_variable(
"ucx_max_rndv_rails",
default="4",
description="",
workload_group="all_workloads",
)
environment_variable(
"UCX_MAX_RNDV_RAILS",
"{ucx_max_rndv_rails}",
description="UCX MAximum RNDV Rails",
workload_group="all_workloads",
)
124 changes: 5 additions & 119 deletions var/ramble/repos/builtin/applications/nvidia-hpl-mxp/application.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,12 @@
from ramble.appkit import *

from ramble.base_app.builtin.hpl import Hpl as HplBase
from ramble.base_app.builtin.nvidia_hpc_benchmarks import (
NvidiaHpcBenchmarks as NvidiaHpcBase,
)


class NvidiaHplMxp(HplBase):
class NvidiaHplMxp(HplBase, NvidiaHpcBase):
"""This application defines how to run NVIDIA's optimized version of HPL,
which is contained in NVIDIA's HPC-Benchmarks collection.

Expand All @@ -36,7 +39,7 @@ class NvidiaHplMxp(HplBase):

executable(
"execute",
'/workspace/hpl-mxp.sh --gpu-affinity "{gpu_affinity}" --n {Ns} --nb {block_size} --nprow {Ps} --npcol {Qs} --nporder {nporder}',
'{internal_mpi_command} /workspace/hpl-mxp.sh --gpu-affinity "{gpu_affinity}" --n {Ns} --nb {block_size} --nprow {Ps} --npcol {Qs} --nporder {nporder}',
use_mpi=True,
)

Expand All @@ -50,123 +53,6 @@ class NvidiaHplMxp(HplBase):
workloads=["standard", "calculator"],
)

workload_variable(
"nvshmem_disable_cuda_vmm",
default="1",
description="",
workload_group="all_workloads",
)
environment_variable(
"NVSHMEM_DISABLE_CUDA_VMM",
"{nvshmem_disable_cuda_vmm}",
description="",
workload_group="all_workloads",
)

workload_variable(
"hpl_fct_comm_policy",
default="1",
description="",
workload_group="all_workloads",
)
environment_variable(
"HPL_FCT_COMM_POLICY",
"{hpl_fct_comm_policy}",
description="",
workload_group="all_workloads",
)

workload_variable(
"hpl_use_nvshmem",
default="0",
description="Whether to use NVSHMEM or not",
workload_group="all_workloads",
)
environment_variable(
"HPL_USE_NVSHMEM",
"{hpl_use_nvshmem}",
description="Whether or not to use NVSHMEM",
workload_group="all_workloads",
)

workload_variable(
"hpl_p2p_as_bcast",
default="0",
description="0 = ncclBcast, 1 = ncclSend/Recv",
workload_group="all_workloads",
)
environment_variable(
"HPL_P2P_AS_BCAST",
"{hpl_p2p_as_bcast}",
description="Whether or not to use P2P for BCAST",
workload_group="all_workloads",
)

workload_variable(
"pmix_mca_gds",
default="^ds12",
description="",
workload_group="all_workloads",
)
environment_variable(
"PMIX_MCA_gds",
"{pmix_mca_gds}",
description="PMIX MCA gds",
workload_group="all_workloads",
)

workload_variable(
"ompi_mca_btl",
default="^vader,tcp,openib,uct",
description="",
workload_group="all_workloads",
)
environment_variable(
"OMPI_MCA_btl",
"{ompi_mca_btl}",
description="OpenMPI MCA btl",
workload_group="all_workloads",
)

workload_variable(
"ompi_mca_pml",
default="ucx",
description="",
workload_group="all_workloads",
)
environment_variable(
"OMPI_MCA_pml",
"{ompi_mca_pml}",
description="OpenMPI MCA pml",
workload_group="all_workloads",
)

workload_variable(
"ucx_net_devices",
default="enp6s0,enp12s0,enp134s0,enp140s0",
description="",
workload_group="all_workloads",
)
environment_variable(
"UCX_NET_DEVICES",
"{ucx_net_devices}",
description="UCX Net Devices",
workload_group="all_workloads",
)

workload_variable(
"ucx_max_rndv_rails",
default="4",
description="",
workload_group="all_workloads",
)
environment_variable(
"UCX_MAX_RNDV_RAILS",
"{ucx_max_rndv_rails}",
description="UCX MAximum RNDV Rails",
workload_group="all_workloads",
)

workload_variable(
"block_size",
default="1024",
Expand Down
Loading
Loading