Skip to content

Commit

Permalink
move determine_number_of_jobs into spack.util.cpus, use it in concret…
Browse files Browse the repository at this point in the history
…ize (spack#37620)
  • Loading branch information
haampie authored Sep 7, 2023
1 parent 4429e17 commit 7bd95f6
Show file tree
Hide file tree
Showing 10 changed files with 86 additions and 49 deletions.
4 changes: 2 additions & 2 deletions lib/spack/spack/bootstrap/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@

from llnl.util import tty

import spack.build_environment
import spack.environment
import spack.tengine
import spack.util.cpus
import spack.util.executable
from spack.environment import depfile

Expand Down Expand Up @@ -137,7 +137,7 @@ def _install_with_depfile(self) -> None:
"-C",
str(self.environment_root()),
"-j",
str(spack.build_environment.determine_number_of_jobs(parallel=True)),
str(spack.util.cpus.determine_number_of_jobs(parallel=True)),
**kwargs,
)

Expand Down
35 changes: 1 addition & 34 deletions lib/spack/spack/build_environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@
from spack.error import NoHeadersError, NoLibrariesError
from spack.install_test import spack_install_test_log
from spack.installer import InstallError
from spack.util.cpus import cpus_available
from spack.util.cpus import determine_number_of_jobs
from spack.util.environment import (
SYSTEM_DIRS,
EnvironmentModifications,
Expand Down Expand Up @@ -537,39 +537,6 @@ def update_compiler_args_for_dep(dep):
env.set(SPACK_RPATH_DIRS, ":".join(rpath_dirs))


def determine_number_of_jobs(
parallel=False, command_line=None, config_default=None, max_cpus=None
):
"""
Packages that require sequential builds need 1 job. Otherwise we use the
number of jobs set on the command line. If not set, then we use the config
defaults (which is usually set through the builtin config scope), but we
cap to the number of CPUs available to avoid oversubscription.
Parameters:
parallel (bool or None): true when package supports parallel builds
command_line (int or None): command line override
config_default (int or None): config default number of jobs
max_cpus (int or None): maximum number of CPUs available. When None, this
value is automatically determined.
"""
if not parallel:
return 1

if command_line is None and "command_line" in spack.config.scopes():
command_line = spack.config.get("config:build_jobs", scope="command_line")

if command_line is not None:
return command_line

max_cpus = max_cpus or cpus_available()

# in some rare cases _builtin config may not be set, so default to max 16
config_default = config_default or spack.config.get("config:build_jobs", 16)

return min(max_cpus, config_default)


def set_module_variables_for_package(pkg):
"""Populate the Python module of a package with some useful global names.
This makes things easier for package writers.
Expand Down
5 changes: 3 additions & 2 deletions lib/spack/spack/build_systems/racket.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,10 @@
import llnl.util.tty as tty

import spack.builder
from spack.build_environment import SPACK_NO_PARALLEL_MAKE, determine_number_of_jobs
from spack.build_environment import SPACK_NO_PARALLEL_MAKE
from spack.directives import build_system, extends, maintainers
from spack.package_base import PackageBase
from spack.util.cpus import determine_number_of_jobs
from spack.util.environment import env_flag
from spack.util.executable import Executable, ProcessError

Expand Down Expand Up @@ -92,7 +93,7 @@ def install(self, pkg, spec, prefix):
"--copy",
"-i",
"-j",
str(determine_number_of_jobs(parallel)),
str(determine_number_of_jobs(parallel=parallel)),
"--",
os.getcwd(),
]
Expand Down
2 changes: 1 addition & 1 deletion lib/spack/spack/environment/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -1504,7 +1504,7 @@ def _concretize_separately(self, tests=False):
start = time.time()
max_processes = min(
len(arguments), # Number of specs
spack.config.get("config:build_jobs"), # Cap on build jobs
spack.util.cpus.determine_number_of_jobs(parallel=True),
)

# TODO: revisit this print as soon as darwin is parallel too
Expand Down
1 change: 1 addition & 0 deletions lib/spack/spack/package.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@
on_package_attributes,
)
from spack.spec import InvalidSpecDetected, Spec
from spack.util.cpus import determine_number_of_jobs
from spack.util.executable import *
from spack.variant import (
any_combination_of,
Expand Down
49 changes: 42 additions & 7 deletions lib/spack/spack/test/build_environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,9 @@
import spack.package_base
import spack.spec
import spack.util.spack_yaml as syaml
from spack.build_environment import _static_to_shared_library, determine_number_of_jobs, dso_suffix
from spack.build_environment import _static_to_shared_library, dso_suffix
from spack.paths import build_env_path
from spack.util.cpus import determine_number_of_jobs
from spack.util.environment import EnvironmentModifications
from spack.util.executable import Executable
from spack.util.path import Path, convert_to_platform_path
Expand Down Expand Up @@ -442,7 +443,7 @@ def test_parallel_false_is_not_propagating(default_mock_concretization):

spack.build_environment.set_module_variables_for_package(s["b"].package)
assert s["b"].package.module.make_jobs == spack.build_environment.determine_number_of_jobs(
s["b"].package.parallel
parallel=s["b"].package.parallel
)


Expand Down Expand Up @@ -474,28 +475,62 @@ def test_setting_dtags_based_on_config(config_setting, expected_flag, config, mo

def test_build_jobs_sequential_is_sequential():
assert (
determine_number_of_jobs(parallel=False, command_line=8, config_default=8, max_cpus=8) == 1
determine_number_of_jobs(
parallel=False,
max_cpus=8,
config=spack.config.Configuration(
spack.config.InternalConfigScope("command_line", {"config": {"build_jobs": 8}}),
spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 8}}),
),
)
== 1
)


def test_build_jobs_command_line_overrides():
assert (
determine_number_of_jobs(parallel=True, command_line=10, config_default=1, max_cpus=1)
determine_number_of_jobs(
parallel=True,
max_cpus=1,
config=spack.config.Configuration(
spack.config.InternalConfigScope("command_line", {"config": {"build_jobs": 10}}),
spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 1}}),
),
)
== 10
)
assert (
determine_number_of_jobs(parallel=True, command_line=10, config_default=100, max_cpus=100)
determine_number_of_jobs(
parallel=True,
max_cpus=100,
config=spack.config.Configuration(
spack.config.InternalConfigScope("command_line", {"config": {"build_jobs": 10}}),
spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 100}}),
),
)
== 10
)


def test_build_jobs_defaults():
assert (
determine_number_of_jobs(parallel=True, command_line=None, config_default=1, max_cpus=10)
determine_number_of_jobs(
parallel=True,
max_cpus=10,
config=spack.config.Configuration(
spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 1}})
),
)
== 1
)
assert (
determine_number_of_jobs(parallel=True, command_line=None, config_default=100, max_cpus=10)
determine_number_of_jobs(
parallel=True,
max_cpus=10,
config=spack.config.Configuration(
spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 100}})
),
)
== 10
)

Expand Down
36 changes: 36 additions & 0 deletions lib/spack/spack/util/cpus.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@

import multiprocessing
import os
from typing import Optional

import spack.config


def cpus_available():
Expand All @@ -18,3 +21,36 @@ def cpus_available():
return len(os.sched_getaffinity(0)) # novermin
except Exception:
return multiprocessing.cpu_count()


def determine_number_of_jobs(
*,
parallel: bool = False,
max_cpus: int = cpus_available(),
config: Optional["spack.config.Configuration"] = None,
) -> int:
"""
Packages that require sequential builds need 1 job. Otherwise we use the
number of jobs set on the command line. If not set, then we use the config
defaults (which is usually set through the builtin config scope), but we
cap to the number of CPUs available to avoid oversubscription.
Parameters:
parallel: true when package supports parallel builds
max_cpus: maximum number of CPUs to use (defaults to cpus_available())
config: configuration object (defaults to global config)
"""
if not parallel:
return 1

cfg = config or spack.config.CONFIG

# Command line overrides all
try:
command_line = cfg.get("config:build_jobs", default=None, scope="command_line")
if command_line is not None:
return command_line
except ValueError:
pass

return min(max_cpus, cfg.get("config:build_jobs", 16))
1 change: 0 additions & 1 deletion var/spack/repos/builtin/packages/gmake/package.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import os
import re

from spack.build_environment import MakeExecutable, determine_number_of_jobs
from spack.package import *


Expand Down
1 change: 0 additions & 1 deletion var/spack/repos/builtin/packages/ninja-fortran/package.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

from spack.build_environment import MakeExecutable, determine_number_of_jobs
from spack.package import *
from spack.util.executable import which_string

Expand Down
1 change: 0 additions & 1 deletion var/spack/repos/builtin/packages/ninja/package.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys

from spack.build_environment import MakeExecutable, determine_number_of_jobs
from spack.package import *
from spack.util.executable import which_string

Expand Down

0 comments on commit 7bd95f6

Please sign in to comment.