diff --git a/dpnp/dpnp_iface.py b/dpnp/dpnp_iface.py index a852d1c7fd8..d9756c316c6 100644 --- a/dpnp/dpnp_iface.py +++ b/dpnp/dpnp_iface.py @@ -712,7 +712,7 @@ def is_cuda_backend(obj=None): if ( sycl_device is not None and sycl_device.backend == dpctl.backend_type.cuda - ): + ): # pragma: no cover return True return False diff --git a/dpnp/dpnp_iface_indexing.py b/dpnp/dpnp_iface_indexing.py index 85547433a8f..2bc7f122320 100644 --- a/dpnp/dpnp_iface_indexing.py +++ b/dpnp/dpnp_iface_indexing.py @@ -138,7 +138,7 @@ def choose(x1, choices, out=None, mode="raise"): ) if x1_desc: - if dpnp.is_cuda_backend(x1_desc.get_array()): + if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) diff --git a/dpnp/dpnp_iface_libmath.py b/dpnp/dpnp_iface_libmath.py index 9bb6328cb19..eaf6c5676a4 100644 --- a/dpnp/dpnp_iface_libmath.py +++ b/dpnp/dpnp_iface_libmath.py @@ -82,7 +82,7 @@ def erf(in_array1): in_array1, copy_when_strides=False, copy_when_nondefault_queue=False ) if x1_desc: - if dpnp.is_cuda_backend(x1_desc.get_array()): + if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index 25d8c7e01c8..cf3d14b98de 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -2946,7 +2946,7 @@ def modf(x1, **kwargs): x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) if x1_desc: - if dpnp.is_cuda_backend(x1_desc.get_array()): + if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) diff --git a/dpnp/dpnp_iface_sorting.py b/dpnp/dpnp_iface_sorting.py index 22a1f447da4..6700dc8ffb8 100644 --- a/dpnp/dpnp_iface_sorting.py +++ b/dpnp/dpnp_iface_sorting.py @@ -215,7 +215,7 @@ def partition(x1, kth, axis=-1, kind="introselect", order=None): x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) if x1_desc: - if dpnp.is_cuda_backend(x1_desc.get_array()): + if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) diff --git a/dpnp/linalg/dpnp_utils_linalg.py b/dpnp/linalg/dpnp_utils_linalg.py index fda4af36f79..97ff26a7043 100644 --- a/dpnp/linalg/dpnp_utils_linalg.py +++ b/dpnp/linalg/dpnp_utils_linalg.py @@ -401,7 +401,7 @@ def _batched_qr(a, mode="reduced"): # w/a to avoid raice conditional on CUDA during multiple runs # TODO: Remove it ones the OneMath issue is resolved # https://github.com/uxlfoundation/oneMath/issues/626 - if dpnp.is_cuda_backend(a_sycl_queue): + if dpnp.is_cuda_backend(a_sycl_queue): # pragma: no cover ht_ev.wait() else: _manager.add_event_pair(ht_ev, geqrf_ev) @@ -2479,7 +2479,7 @@ def dpnp_qr(a, mode="reduced"): # w/a to avoid raice conditional on CUDA during multiple runs # TODO: Remove it ones the OneMath issue is resolved # https://github.com/uxlfoundation/oneMath/issues/626 - if dpnp.is_cuda_backend(a_sycl_queue): + if dpnp.is_cuda_backend(a_sycl_queue): # pragma: no cover ht_ev.wait() else: _manager.add_event_pair(ht_ev, geqrf_ev) diff --git a/dpnp/random/dpnp_iface_random.py b/dpnp/random/dpnp_iface_random.py index 74c7d378b19..d20b4f64c2a 100644 --- a/dpnp/random/dpnp_iface_random.py +++ b/dpnp/random/dpnp_iface_random.py @@ -140,7 +140,7 @@ def beta(a, b, size=None): """ if not use_origin_backend(a): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -191,7 +191,7 @@ def binomial(n, p, size=None): """ if not use_origin_backend(n): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -248,7 +248,7 @@ def chisquare(df, size=None): """ if not use_origin_backend(df): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -321,7 +321,7 @@ def exponential(scale=1.0, size=None): """ if not use_origin_backend(scale): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -358,7 +358,7 @@ def f(dfnum, dfden, size=None): """ if not use_origin_backend(dfnum): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -401,7 +401,7 @@ def gamma(shape, scale=1.0, size=None): """ if not use_origin_backend(scale): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -444,7 +444,7 @@ def geometric(p, size=None): """ if not use_origin_backend(p): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -483,7 +483,7 @@ def gumbel(loc=0.0, scale=1.0, size=None): """ if not use_origin_backend(loc): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -526,7 +526,7 @@ def hypergeometric(ngood, nbad, nsample, size=None): """ if not use_origin_backend(ngood): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -579,7 +579,7 @@ def laplace(loc=0.0, scale=1.0, size=None): """ if not use_origin_backend(loc): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -618,7 +618,7 @@ def logistic(loc=0.0, scale=1.0, size=None): """ if not use_origin_backend(loc): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -664,7 +664,7 @@ def lognormal(mean=0.0, sigma=1.0, size=None): """ if not use_origin_backend(mean): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -726,7 +726,7 @@ def multinomial(n, pvals, size=None): pvals_sum = sum(pvals) pvals_desc = dpnp.get_dpnp_descriptor(dpnp.array(pvals)) d = len(pvals) - if dpnp.is_cuda_backend(pvals_desc.get_array()): + if dpnp.is_cuda_backend(pvals_desc.get_array()): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -780,7 +780,7 @@ def multivariate_normal(mean, cov, size=None, check_valid="warn", tol=1e-8): cov_ = dpnp.get_dpnp_descriptor(dpnp.array(cov, dtype=dpnp.float64)) if dpnp.is_cuda_backend(mean_.get_array()) or dpnp.is_cuda_backend( cov_.get_array() - ): + ): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -839,7 +839,7 @@ def negative_binomial(n, p, size=None): """ if not use_origin_backend(n): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -929,7 +929,7 @@ def noncentral_chisquare(df, nonc, size=None): """ if not use_origin_backend(df): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -988,7 +988,7 @@ def pareto(a, size=None): """ if not use_origin_backend(a): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -1062,7 +1062,7 @@ def poisson(lam=1.0, size=None): """ if not use_origin_backend(lam): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -1102,7 +1102,7 @@ def power(a, size=None): """ if not use_origin_backend(a): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -1524,7 +1524,7 @@ def rayleigh(scale=1.0, size=None): """ if not use_origin_backend(scale): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -1606,7 +1606,7 @@ def shuffle(x1): x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False) if x1_desc: - if dpnp.is_cuda_backend(x1_desc.get_array()): + if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -1655,7 +1655,7 @@ def seed(seed=None, device=None, sycl_queue=None): ) if not use_origin_backend(seed): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -1700,7 +1700,7 @@ def standard_cauchy(size=None): """ if not use_origin_backend(size): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -1729,7 +1729,7 @@ def standard_exponential(size=None): """ if not use_origin_backend(size): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -1761,7 +1761,7 @@ def standard_gamma(shape, size=None): """ if not use_origin_backend(shape): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -1844,7 +1844,7 @@ def standard_t(df, size=None): """ if not use_origin_backend(df): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -1885,7 +1885,7 @@ def triangular(left, mode, right, size=None): """ if not use_origin_backend(left): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -1998,7 +1998,7 @@ def vonmises(mu, kappa, size=None): """ if not use_origin_backend(mu): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -2039,7 +2039,7 @@ def wald(mean, scale, size=None): """ if not use_origin_backend(mean): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -2080,7 +2080,7 @@ def weibull(a, size=None): """ if not use_origin_backend(a): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -2117,7 +2117,7 @@ def zipf(a, size=None): """ if not use_origin_backend(a): - if dpnp.is_cuda_backend(): + if dpnp.is_cuda_backend(): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) diff --git a/dpnp/random/dpnp_random_state.py b/dpnp/random/dpnp_random_state.py index 7cd6e05c81f..774095d518e 100644 --- a/dpnp/random/dpnp_random_state.py +++ b/dpnp/random/dpnp_random_state.py @@ -235,7 +235,7 @@ def normal( """ if not use_origin_backend(): - if dpnp.is_cuda_backend(self): + if dpnp.is_cuda_backend(self): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -369,7 +369,7 @@ def randint(self, low, high=None, size=None, dtype=int, usm_type="device"): """ if not use_origin_backend(low): - if dpnp.is_cuda_backend(self): + if dpnp.is_cuda_backend(self): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" ) @@ -598,7 +598,7 @@ def uniform( """ if not use_origin_backend(): - if dpnp.is_cuda_backend(self): + if dpnp.is_cuda_backend(self): # pragma: no cover raise NotImplementedError( "Running on CUDA is currently not supported" )