Skip to content

Commit

Permalink
Update intel-hpl app definition
Browse files Browse the repository at this point in the history
This update includes:

* Ensure intel-hpl is invoked with correct numa control. Previously it
  directly invokes the `xhpl_intel64_dynamic` binary without setting the
  `HPL_HOST_NODE` env var. Now it's changed to call `runme_intel64_prv`
  runner script first which sets up the env var.

* Accommodate new mkl 2024 directory structure for locating the
  benchmark files.

Tested with both mkl 2023 and 2024.
  • Loading branch information
linsword13 committed Sep 24, 2024
1 parent e7318fb commit 8eaaaad
Showing 1 changed file with 47 additions and 8 deletions.
55 changes: 47 additions & 8 deletions var/ramble/repos/builtin/applications/intel-hpl/application.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,27 +26,66 @@ class IntelHpl(ExecutableApplication):

tags("benchmark-app", "benchmark", "linpack", "optimized", "intel", "mkl")

define_compiler("gcc9", pkg_spec="gcc@9.3.0", package_manager="spack*")
define_compiler("gcc13p2", pkg_spec="gcc@13.2.0", package_manager="spack*")
software_spec(
"imkl_2023p1",
pkg_spec="intel-oneapi-mkl@2023.1.0 threads=openmp",
compiler="gcc9",
"imkl_2024p2",
pkg_spec="intel-oneapi-mkl@2024.2.0 threads=openmp",
compiler="gcc13p2",
package_manager="spack*",
)
software_spec(
"impi_2018", pkg_spec="[email protected]", package_manager="spack*"
"impi2021p11",
pkg_spec="[email protected]",
package_manager="spack*",
)

required_package("intel-oneapi-mkl", package_manager="spack*")

# This step does a few things:
# - Set up the env variables expected by runme_intel64_prv
# (We call this runner script instead of the underlying xhpl_intel64_dynamic
# since it sets up derived env var HPL_HOST_NODE for numa placement control.)
# - Link in the xhpl_intel64_dynamic binary to the running dir
# (This is needed due to runme_intel64_prv invoking it using "./")
# - Account for newer directory layout from mkl 2024
executable(
"prepare",
template=[
r"""
export MPI_PROC_NUM="{n_ranks}"
export MPI_PER_NODE="{processes_per_node}"
export NUMA_PER_MPI="{numa_per_mpi}"
export HPL_EXE=xhpl_intel64_dynamic
hpl_bench_dir="{intel-oneapi-mkl_path}/mkl/latest/benchmarks/mp_linpack"
if [ ! -d ${hpl_bench_dir} ]; then
hpl_bench_dir="{intel-oneapi-mkl_path}/mkl/latest/share/mkl/benchmarks/mp_linpack"
fi
ln -sf ${hpl_bench_dir}/xhpl_intel64_dynamic {experiment_run_dir}/.
hpl_run="${hpl_bench_dir}/runme_intel64_prv"
""".strip()
],
mpi=False,
redirect="",
output_capture="",
)

executable(
"execute",
"{intel-oneapi-mkl_path}/mkl/latest/benchmarks/mp_linpack/xhpl_intel64_dynamic",
"${hpl_run}",
use_mpi=True,
)

workload("standard", executables=["execute"])
workload("calculator", executables=["execute"])
workload("standard", executables=["prepare", "execute"])
workload("calculator", executables=["prepare", "execute"])

workload_variable(
"numa_per_mpi",
description="numa per mpi process",
default="1",
workloads=["*"],
)

# standard workload-specific variables:

workload_variable(
"output_file",
Expand Down

0 comments on commit 8eaaaad

Please sign in to comment.