Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Abstract HPCG and add NVIDIA-HPCG #810

Merged
merged 2 commits into from
Jan 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
124 changes: 13 additions & 111 deletions var/ramble/repos/builtin/applications/hpcg/application.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,20 +6,26 @@
# option. This file may not be copied, modified, or distributed
# except according to those terms.

import os
from ramble.appkit import *
from ramble.expander import Expander
from ramble.base_app.builtin.hpcg import Hpcg as BaseHpcg


class Hpcg(ExecutableApplication):
"""Define HPCG application"""
class Hpcg(BaseHpcg):
"""The High Performance Conjugate Gradients (HPCG) Benchmark project is an
effort to create a new metric for ranking HPC systems. HPCG is intended as
a complement to the High Performance LINPACK (HPL) benchmark, currently
used to rank the TOP500 computing systems. The computational and data
access patterns of HPL are still representative of some important scalable
applications, but not all. HPCG is designed to exercise computational and
data access patterns that more closely match a different and broad set of
important applications, and to give incentive to computer system designers
to invest in capabilities that will have impact on the collective
performance of these applications."""

name = "hpcg"

maintainers("douglasjacobsen")

tags("benchmark-app", "mini-app", "benchmark")

define_compiler("gcc9", pkg_spec="[email protected]", package_manager="spack*")

software_spec(
Expand All @@ -35,110 +41,6 @@ class Hpcg(ExecutableApplication):

required_package("hpcg", package_manager="spack*")

executable("execute", "xhpcg", use_mpi=True)

executable("move-log", "mv HPCG-Benchmark*.txt {out_file}", use_mpi=False)

workload("standard", executables=["execute", "move-log"])

workload_variable(
"matrix_size",
default="104 104 104",
description="Dimensions of the matrix to use",
workloads=["standard"],
)

workload_variable(
"iterations",
default="60",
description="Number of iterations to perform",
workloads=["standard"],
)

workload_variable(
"out_file",
default="{experiment_run_dir}/hpcg_result.out",
description="Output file for results",
workloads=["standard"],
)

log_str = Expander.expansion_str("out_file")

figure_of_merit(
"Status",
log_file=log_str,
fom_regex=r"Final Summary::HPCG result is (?P<status>[a-zA-Z]+) with a GFLOP/s rating of=(?P<gflops>[0-9]+\.[0-9]+)",
group_name="status",
units="",
)

figure_of_merit(
"Gflops",
log_file=log_str,
fom_regex=r"Final Summary::HPCG result is (?P<status>[a-zA-Z]+) with a GFLOP/s rating of=(?P<gflops>[0-9]+\.[0-9]+)",
group_name="gflops",
units="GFLOP/s",
)

figure_of_merit(
"Time",
log_file=log_str,
fom_regex=r"Final Summary::Results are.* execution time.*is=(?P<exec_time>[0-9]+\.[0-9]*)",
group_name="exec_time",
units="s",
)

figure_of_merit(
"ComputeDotProductMsg",
log_file=log_str,
fom_regex=r"Final Summary::Reference version of ComputeDotProduct used.*=(?P<msg>.*)",
group_name="msg",
units="",
)

figure_of_merit(
"ComputeSPMVMsg",
log_file=log_str,
fom_regex=r"Final Summary::Reference version of ComputeSPMV used.*=(?P<msg>.*)",
group_name="msg",
units="",
)

figure_of_merit(
"ComputeMGMsg",
log_file=log_str,
fom_regex=r"Final Summary::Reference version of ComputeMG used.*=(?P<msg>.*)",
group_name="msg",
units="",
)

figure_of_merit(
"ComputeWAXPBYMsg",
log_file=log_str,
fom_regex=r"Final Summary::Reference version of ComputeWAXPBY used.*=(?P<msg>.*)",
group_name="msg",
units="",
)

figure_of_merit(
"HPCG 2.4 Rating",
log_file=log_str,
fom_regex=r"Final Summary::HPCG 2\.4 rating.*=(?P<rating>[0-9]+\.*[0-9]*)",
group_name="rating",
units="",
)

def _make_experiments(self, workspace, app_inst=None):
super()._make_experiments(workspace)

input_path = os.path.join(
self.expander.expand_var_name("experiment_run_dir"), "hpcg.dat"
)

with open(input_path, "w+") as f:
f.write("HPCG benchmark input file\n")
f.write(
"Sandia National Laboratories; University of Tennessee, Knoxville\n"
)
f.write(self.expander.expand_var_name("matrix_size") + "\n")
f.write(self.expander.expand_var_name("iterations") + "\n")
workload_group("all_workloads", workloads=["standard"], mode="append")
149 changes: 149 additions & 0 deletions var/ramble/repos/builtin/applications/nvidia-hpcg/application.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
# Copyright 2022-2025 The Ramble Authors
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.

from ramble.appkit import *
from ramble.base_app.builtin.hpcg import Hpcg as BaseHpcg


class NvidiaHpcg(BaseHpcg):
"""NVIDIA's HPCG benchmark accelerates the High Performance Conjugate
Gradients (HPCG) Benchmark. HPCG is a software package that performs a
fixed number of multigrid preconditioned (using a symmetric Gauss-Seidel
smoother) conjugate gradient (PCG) iterations using double precision
(64-bit) floating point values."""

name = "nvidia-hpcg"

maintainers("douglasjacobsen")

executable(
"execute",
"./hpcg.sh --dat {experiment_run_dir}/hpcg.dat",
use_mpi=True,
)

workload("standard", executables=["execute"])

workload_group("all_workloads", workloads=["standard"], mode="append")

workload_variable(
"nvshmem_disable_cuda_vmm",
default="1",
description="",
workload_group="all_workloads",
)
environment_variable(
"NVSHMEM_DISABLE_CUDA_VMM",
"{nvshmem_disable_cuda_vmm}",
description="",
workload_group="all_workloads",
)

workload_variable(
"hpl_fct_comm_policy",
default="1",
description="",
workload_group="all_workloads",
)
environment_variable(
"HPL_FCT_COMM_POLICY",
"{hpl_fct_comm_policy}",
description="",
workload_group="all_workloads",
)

workload_variable(
"hpl_use_nvshmem",
default="0",
description="Whether to use NVSHMEM or not",
workload_group="all_workloads",
)
environment_variable(
"HPL_USE_NVSHMEM",
"{hpl_use_nvshmem}",
description="Whether or not to use NVSHMEM",
workload_group="all_workloads",
)

workload_variable(
"hpl_p2p_as_bcast",
default="0",
description="0 = ncclBcast, 1 = ncclSend/Recv",
workload_group="all_workloads",
)
environment_variable(
"HPL_P2P_AS_BCAST",
"{hpl_p2p_as_bcast}",
description="Whether or not to use P2P for BCAST",
workload_group="all_workloads",
)

workload_variable(
"pmix_mca_gds",
default="^ds12",
description="",
workload_group="all_workloads",
)
environment_variable(
"PMIX_MCA_gds",
"{pmix_mca_gds}",
description="PMIX MCA gds",
workload_group="all_workloads",
)

workload_variable(
"ompi_mca_btl",
default="^vader,tcp,openib,uct",
description="",
workload_group="all_workloads",
)
environment_variable(
"OMPI_MCA_btl",
"{ompi_mca_btl}",
description="OpenMPI MCA btl",
workload_group="all_workloads",
)

workload_variable(
"ompi_mca_pml",
default="ucx",
description="",
workload_group="all_workloads",
)
environment_variable(
"OMPI_MCA_pml",
"{ompi_mca_pml}",
description="OpenMPI MCA pml",
workload_group="all_workloads",
)

workload_variable(
"ucx_net_devices",
default="enp6s0,enp12s0,enp134s0,enp140s0",
description="",
workload_group="all_workloads",
)
environment_variable(
"UCX_NET_DEVICES",
"{ucx_net_devices}",
description="UCX Net Devices",
workload_group="all_workloads",
)

workload_variable(
"ucx_max_rndv_rails",
default="4",
description="",
workload_group="all_workloads",
)
environment_variable(
"UCX_MAX_RNDV_RAILS",
"{ucx_max_rndv_rails}",
description="UCX MAximum RNDV Rails",
workload_group="all_workloads",
)
Loading
Loading