Skip to content

Commit

Permalink
Add no cover for dpnp.is_cuda_backend()
Browse files Browse the repository at this point in the history
  • Loading branch information
vlad-perevezentsev committed Jan 22, 2025
1 parent 2675370 commit f52bbbe
Show file tree
Hide file tree
Showing 8 changed files with 41 additions and 41 deletions.
2 changes: 1 addition & 1 deletion dpnp/dpnp_iface.py
Original file line number Diff line number Diff line change
Expand Up @@ -712,7 +712,7 @@ def is_cuda_backend(obj=None):
if (
sycl_device is not None
and sycl_device.backend == dpctl.backend_type.cuda
):
): # pragma: no cover
return True
return False

Expand Down
2 changes: 1 addition & 1 deletion dpnp/dpnp_iface_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def choose(x1, choices, out=None, mode="raise"):
)

if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down
2 changes: 1 addition & 1 deletion dpnp/dpnp_iface_libmath.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def erf(in_array1):
in_array1, copy_when_strides=False, copy_when_nondefault_queue=False
)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down
2 changes: 1 addition & 1 deletion dpnp/dpnp_iface_mathematical.py
Original file line number Diff line number Diff line change
Expand Up @@ -2946,7 +2946,7 @@ def modf(x1, **kwargs):

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down
2 changes: 1 addition & 1 deletion dpnp/dpnp_iface_sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def partition(x1, kth, axis=-1, kind="introselect", order=None):

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down
4 changes: 2 additions & 2 deletions dpnp/linalg/dpnp_utils_linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,7 @@ def _batched_qr(a, mode="reduced"):
# w/a to avoid raice conditional on CUDA during multiple runs
# TODO: Remove it ones the OneMath issue is resolved
# https://github.com/uxlfoundation/oneMath/issues/626
if dpnp.is_cuda_backend(a_sycl_queue):
if dpnp.is_cuda_backend(a_sycl_queue): # pragma: no cover
ht_ev.wait()
else:
_manager.add_event_pair(ht_ev, geqrf_ev)
Expand Down Expand Up @@ -2479,7 +2479,7 @@ def dpnp_qr(a, mode="reduced"):
# w/a to avoid raice conditional on CUDA during multiple runs
# TODO: Remove it ones the OneMath issue is resolved
# https://github.com/uxlfoundation/oneMath/issues/626
if dpnp.is_cuda_backend(a_sycl_queue):
if dpnp.is_cuda_backend(a_sycl_queue): # pragma: no cover
ht_ev.wait()
else:
_manager.add_event_pair(ht_ev, geqrf_ev)
Expand Down
62 changes: 31 additions & 31 deletions dpnp/random/dpnp_iface_random.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def beta(a, b, size=None):
"""

if not use_origin_backend(a):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -191,7 +191,7 @@ def binomial(n, p, size=None):
"""

if not use_origin_backend(n):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -248,7 +248,7 @@ def chisquare(df, size=None):
"""

if not use_origin_backend(df):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -321,7 +321,7 @@ def exponential(scale=1.0, size=None):
"""

if not use_origin_backend(scale):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -358,7 +358,7 @@ def f(dfnum, dfden, size=None):
"""

if not use_origin_backend(dfnum):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -401,7 +401,7 @@ def gamma(shape, scale=1.0, size=None):
"""

if not use_origin_backend(scale):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -444,7 +444,7 @@ def geometric(p, size=None):
"""

if not use_origin_backend(p):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -483,7 +483,7 @@ def gumbel(loc=0.0, scale=1.0, size=None):
"""

if not use_origin_backend(loc):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -526,7 +526,7 @@ def hypergeometric(ngood, nbad, nsample, size=None):
"""

if not use_origin_backend(ngood):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -579,7 +579,7 @@ def laplace(loc=0.0, scale=1.0, size=None):
"""

if not use_origin_backend(loc):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -618,7 +618,7 @@ def logistic(loc=0.0, scale=1.0, size=None):
"""

if not use_origin_backend(loc):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -664,7 +664,7 @@ def lognormal(mean=0.0, sigma=1.0, size=None):
"""

if not use_origin_backend(mean):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -726,7 +726,7 @@ def multinomial(n, pvals, size=None):
pvals_sum = sum(pvals)
pvals_desc = dpnp.get_dpnp_descriptor(dpnp.array(pvals))
d = len(pvals)
if dpnp.is_cuda_backend(pvals_desc.get_array()):
if dpnp.is_cuda_backend(pvals_desc.get_array()): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -780,7 +780,7 @@ def multivariate_normal(mean, cov, size=None, check_valid="warn", tol=1e-8):
cov_ = dpnp.get_dpnp_descriptor(dpnp.array(cov, dtype=dpnp.float64))
if dpnp.is_cuda_backend(mean_.get_array()) or dpnp.is_cuda_backend(
cov_.get_array()
):
): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -839,7 +839,7 @@ def negative_binomial(n, p, size=None):
"""

if not use_origin_backend(n):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -929,7 +929,7 @@ def noncentral_chisquare(df, nonc, size=None):
"""

if not use_origin_backend(df):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -988,7 +988,7 @@ def pareto(a, size=None):
"""

if not use_origin_backend(a):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -1062,7 +1062,7 @@ def poisson(lam=1.0, size=None):
"""

if not use_origin_backend(lam):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -1102,7 +1102,7 @@ def power(a, size=None):
"""

if not use_origin_backend(a):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -1524,7 +1524,7 @@ def rayleigh(scale=1.0, size=None):
"""

if not use_origin_backend(scale):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -1606,7 +1606,7 @@ def shuffle(x1):
x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False)
if x1_desc:

if dpnp.is_cuda_backend(x1_desc.get_array()):
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -1655,7 +1655,7 @@ def seed(seed=None, device=None, sycl_queue=None):
)

if not use_origin_backend(seed):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -1700,7 +1700,7 @@ def standard_cauchy(size=None):
"""

if not use_origin_backend(size):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -1729,7 +1729,7 @@ def standard_exponential(size=None):
"""

if not use_origin_backend(size):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -1761,7 +1761,7 @@ def standard_gamma(shape, size=None):
"""

if not use_origin_backend(shape):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -1844,7 +1844,7 @@ def standard_t(df, size=None):
"""

if not use_origin_backend(df):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -1885,7 +1885,7 @@ def triangular(left, mode, right, size=None):
"""

if not use_origin_backend(left):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -1998,7 +1998,7 @@ def vonmises(mu, kappa, size=None):
"""

if not use_origin_backend(mu):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -2039,7 +2039,7 @@ def wald(mean, scale, size=None):
"""

if not use_origin_backend(mean):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -2080,7 +2080,7 @@ def weibull(a, size=None):
"""

if not use_origin_backend(a):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -2117,7 +2117,7 @@ def zipf(a, size=None):
"""

if not use_origin_backend(a):
if dpnp.is_cuda_backend():
if dpnp.is_cuda_backend(): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down
6 changes: 3 additions & 3 deletions dpnp/random/dpnp_random_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ def normal(
"""

if not use_origin_backend():
if dpnp.is_cuda_backend(self):
if dpnp.is_cuda_backend(self): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -369,7 +369,7 @@ def randint(self, low, high=None, size=None, dtype=int, usm_type="device"):
"""

if not use_origin_backend(low):
if dpnp.is_cuda_backend(self):
if dpnp.is_cuda_backend(self): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down Expand Up @@ -598,7 +598,7 @@ def uniform(
"""

if not use_origin_backend():
if dpnp.is_cuda_backend(self):
if dpnp.is_cuda_backend(self): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down

0 comments on commit f52bbbe

Please sign in to comment.