Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Handle dpnp functions and tests to run on CUDA devices #2075

Merged
merged 51 commits into from
Jan 17, 2025
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
51 commits
Select commit Hold shift + click to select a range
b6bd08a
Testing is adapted for cuda devices
npolina4 Sep 20, 2024
5f1b083
Apply fallback to numpy for all unsupported functions on cuda device.
npolina4 Sep 24, 2024
fe8fe11
update tests
npolina4 Sep 25, 2024
0c64722
Applied review comments
npolina4 Sep 26, 2024
4ad814c
Merge branch 'master' into tests_cuda
npolina4 Oct 22, 2024
6e2c3c7
Update test_indexing.py
npolina4 Oct 25, 2024
4e3c87c
Update test_solve.py
npolina4 Oct 25, 2024
94a418e
Update test_histogram.py
npolina4 Oct 25, 2024
1461e81
Update test_histogram.py
npolina4 Oct 25, 2024
47f51e3
Merge branch 'master' into tests_cuda
npolina4 Oct 25, 2024
2fca5f7
Merge commit '7bfe0c8eec481452dcdd07d99eaf01373769ab5b' into tests_cuda
vlad-perevezentsev Nov 14, 2024
519008e
Update skipped_tests_cuda.tbl
vlad-perevezentsev Nov 14, 2024
a38949d
Apply fallback to numpy for TestRational and test_copy_multigpu
vlad-perevezentsev Nov 14, 2024
139c784
Address remarks
vlad-perevezentsev Nov 14, 2024
f7e3778
Merge master into tests_cuda
vlad-perevezentsev Nov 21, 2024
42f20fc
Merge master into tests_cuda
vlad-perevezentsev Nov 27, 2024
351e12d
Use dpctl.select_default_device() in is_cuda_backend() func
vlad-perevezentsev Nov 29, 2024
590dbc8
Raise NotImplementedError in unsupported functions on CUDA
vlad-perevezentsev Nov 29, 2024
cc48533
Implement is_cuda_device() func for tests in helper.py
vlad-perevezentsev Nov 29, 2024
645c6d9
Skipped tests for unsupported functions on CUDA
vlad-perevezentsev Nov 29, 2024
6f688fe
Update test_arithmetic.py
vlad-perevezentsev Nov 29, 2024
b217cc5
Handle TestSpacing to run on CUDA
vlad-perevezentsev Nov 29, 2024
5179c06
Update fft tests to run on CUDA
vlad-perevezentsev Nov 29, 2024
f1c5eaf
Update linalg tests to run on CUDA
vlad-perevezentsev Dec 2, 2024
0a79e66
Avoid using dpnp.random in cupy tests on CUDA
vlad-perevezentsev Dec 2, 2024
e3e9afe
Remove previously added fixtures for unsupported funcs on CUDA
vlad-perevezentsev Dec 2, 2024
f015b88
Merge master into tests_cuda
vlad-perevezentsev Dec 2, 2024
00b7f02
Merge master into tests_cuda
vlad-perevezentsev Dec 5, 2024
ff98aef
Apply remarks
vlad-perevezentsev Dec 5, 2024
20acd25
Merge master into tests_cuda
vlad-perevezentsev Dec 11, 2024
d3f8d12
Update skipped_tests_cuda.tbl
vlad-perevezentsev Dec 11, 2024
7408324
Handle new fft tests for CUDA
vlad-perevezentsev Dec 11, 2024
a7985b3
Skip TestCond::test_nan on CUDA
vlad-perevezentsev Dec 11, 2024
70c9b0a
Unskip linalg tests due to fix in gh-2212
vlad-perevezentsev Dec 11, 2024
556c1a3
Create non-singular matrix in test_usm_type.py::test_cond
vlad-perevezentsev Dec 11, 2024
2931a2b
Update jira ticket number for qr issue
vlad-perevezentsev Dec 11, 2024
73f2ca0
Fix fallback numpy logic in def modf
vlad-perevezentsev Dec 11, 2024
8da6027
Revert deleted fixtures for TestChoose
vlad-perevezentsev Dec 11, 2024
e87174e
Merge master into tests_cuda
vlad-perevezentsev Dec 11, 2024
82f3a17
Merge master into tests_cuda
vlad-perevezentsev Dec 18, 2024
c83c062
Merge master into tests_cuda
vlad-perevezentsev Dec 20, 2024
6769469
Add cuda to list_of_backend_str in test_sycl_queue.py
vlad-perevezentsev Dec 20, 2024
1a3142f
Merge master into tests_cuda
vlad-perevezentsev Dec 20, 2024
2f2235f
Merge master into tests_cuda
vlad-perevezentsev Jan 10, 2025
f0ee02a
Remove skip due to SAT-7588
vlad-perevezentsev Jan 10, 2025
2427599
Update TestFftn::test_fftn
vlad-perevezentsev Jan 10, 2025
26275a7
Pass self to is_cuda_backend
vlad-perevezentsev Jan 10, 2025
773b177
Merge master into tests_cuda
vlad-perevezentsev Jan 14, 2025
deb17ac
Apply remarks
vlad-perevezentsev Jan 14, 2025
79be7d3
Merge master into tests_cuda
vlad-perevezentsev Jan 14, 2025
58b81a8
Apply a minor comment
vlad-perevezentsev Jan 16, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 32 additions & 0 deletions dpnp/dpnp_iface.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@
"get_normalized_queue_device",
"get_result_array",
"get_usm_ndarray",
"is_cuda_backend",
vlad-perevezentsev marked this conversation as resolved.
Show resolved Hide resolved
"get_usm_ndarray_or_scalar",
"is_supported_array_or_scalar",
"is_supported_array_type",
Expand Down Expand Up @@ -757,6 +758,37 @@ def get_usm_ndarray_or_scalar(a):
return a if dpnp.isscalar(a) else get_usm_ndarray(a)


def is_cuda_backend(obj=None):
"""
Checks that object has a cuda backend.
npolina4 marked this conversation as resolved.
Show resolved Hide resolved

Parameters
----------
obj : {Device, SyclDevice, SyclQueue, dpnp.ndarray, usm_ndarray, None},
optional
An input object with sycl_device property to check device backend.
If obj is ``None``, device backend will be checked for the default
npolina4 marked this conversation as resolved.
Show resolved Hide resolved
queue.
Default: ``None``.

Returns
-------
out : bool
Return ``True`` if object has a cuda backend, otherwise``False``.
vlad-perevezentsev marked this conversation as resolved.
Show resolved Hide resolved

"""

if obj is None:
sycl_device = dpctl.SyclQueue().sycl_device
elif isinstance(obj, dpctl.SyclDevice):
sycl_device = obj
else:
sycl_device = getattr(obj, "sycl_device", None)
if sycl_device is not None and "cuda" in sycl_device.backend.name:
npolina4 marked this conversation as resolved.
Show resolved Hide resolved
return True
return False


def is_supported_array_or_scalar(a):
"""
Return ``True`` if `a` is a scalar or an array of either
Expand Down
3 changes: 3 additions & 0 deletions dpnp/dpnp_iface_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ def choose(x1, choices, out=None, mode="raise"):
:obj:`dpnp.take_along_axis` : Preferable if choices is an array.

"""

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)

choices_list = []
Expand All @@ -192,6 +193,8 @@ def choose(x1, choices, out=None, mode="raise"):
pass
elif not choices_list:
pass
elif dpnp.is_cuda_backend(x1):
pass
vlad-perevezentsev marked this conversation as resolved.
Show resolved Hide resolved
else:
size = x1_desc.size
choices_size = choices_list[0].size
Expand Down
2 changes: 1 addition & 1 deletion dpnp/dpnp_iface_libmath.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def erf(in_array1):
x1_desc = dpnp.get_dpnp_descriptor(
in_array1, copy_when_strides=False, copy_when_nondefault_queue=False
)
if x1_desc:
if x1_desc and dpnp.is_cuda_backend(in_array1):
return dpnp_erf(x1_desc).get_pyobj()

result = create_output_descriptor_py(
Expand Down
9 changes: 7 additions & 2 deletions dpnp/dpnp_iface_mathematical.py
Original file line number Diff line number Diff line change
Expand Up @@ -2478,8 +2478,13 @@ def modf(x1, **kwargs):
"""

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc and not kwargs:
return dpnp_modf(x1_desc)
if x1_desc:
if not kwargs:
pass
elif dpnp.is_cuda_backend(x1):
pass
else:
return dpnp_modf(x1_desc)

return call_origin(numpy.modf, x1, **kwargs)

Expand Down
2 changes: 2 additions & 0 deletions dpnp/dpnp_iface_sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,8 @@ def partition(x1, kth, axis=-1, kind="introselect", order=None):
pass
elif order is not None:
pass
elif dpnp.is_cuda_backend(x1):
pass
else:
return dpnp_partition(x1_desc, kth, axis, kind, order).get_pyobj()

Expand Down
4 changes: 4 additions & 0 deletions dpnp/dpnp_iface_statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,6 +380,8 @@ def correlate(x1, x2, mode="valid"):
pass
elif mode != "valid":
pass
elif dpnp.is_cuda_backend(x1) or dpnp.is_cuda_backend(x2):
pass
else:
return dpnp_correlate(x1_desc, x2_desc).get_pyobj()

Expand Down Expand Up @@ -665,6 +667,8 @@ def median(x1, axis=None, out=None, overwrite_input=False, keepdims=False):
pass
elif keepdims:
pass
elif dpnp.is_cuda_backend(x1):
pass
else:
result_obj = dpnp_median(x1_desc).get_pyobj()
result = dpnp.convert_single_elem_array_to_scalar(result_obj)
Expand Down
68 changes: 66 additions & 2 deletions dpnp/random/dpnp_iface_random.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,8 @@ def beta(a, b, size=None):
pass
elif b <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_beta(a, b, size).get_pyobj()

Expand Down Expand Up @@ -196,6 +198,8 @@ def binomial(n, p, size=None):
pass
elif n < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_binomial(int(n), p, size).get_pyobj()

Expand Down Expand Up @@ -244,6 +248,8 @@ def chisquare(df, size=None):
pass
elif df <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
# TODO:
# float to int, safe
Expand Down Expand Up @@ -312,6 +318,8 @@ def exponential(scale=1.0, size=None):
pass
elif scale < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_exponential(scale, size).get_pyobj()

Expand Down Expand Up @@ -348,6 +356,8 @@ def f(dfnum, dfden, size=None):
pass
elif dfden <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_f(dfnum, dfden, size).get_pyobj()

Expand Down Expand Up @@ -386,6 +396,8 @@ def gamma(shape, scale=1.0, size=None):
pass
elif shape < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_gamma(shape, scale, size).get_pyobj()

Expand Down Expand Up @@ -420,6 +432,8 @@ def geometric(p, size=None):
pass
elif p > 1 or p <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_geometric(p, size).get_pyobj()

Expand Down Expand Up @@ -456,6 +470,8 @@ def gumbel(loc=0.0, scale=1.0, size=None):
pass
elif scale < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_gumbel(loc, scale, size).get_pyobj()

Expand Down Expand Up @@ -504,6 +520,8 @@ def hypergeometric(ngood, nbad, nsample, size=None):
pass
elif nsample < 1:
pass
elif dpnp.is_cuda_backend():
pass
else:
_m = int(ngood)
_l = int(ngood) + int(nbad)
Expand Down Expand Up @@ -542,6 +560,8 @@ def laplace(loc=0.0, scale=1.0, size=None):
pass
elif scale < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_laplace(loc, scale, size).get_pyobj()

Expand Down Expand Up @@ -576,6 +596,8 @@ def logistic(loc=0.0, scale=1.0, size=None):
pass
elif scale < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
result = dpnp_rng_logistic(loc, scale, size).get_pyobj()
if size is None or size == 1:
Expand Down Expand Up @@ -617,6 +639,8 @@ def lognormal(mean=0.0, sigma=1.0, size=None):
pass
elif sigma < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_lognormal(mean, sigma, size).get_pyobj()

Expand Down Expand Up @@ -674,6 +698,8 @@ def multinomial(n, pvals, size=None):
pass
elif pvals_sum < 0.0:
pass
elif dpnp.is_cuda_backend():
pass
else:
if size is None:
shape = (d,)
Expand Down Expand Up @@ -725,6 +751,8 @@ def multivariate_normal(mean, cov, size=None, check_valid="warn", tol=1e-8):
pass
elif mean_.shape[0] != cov_.shape[0]:
pass
elif dpnp.is_cuda_backend():
pass
else:
final_shape = list(shape[:])
final_shape.append(mean_.shape[0])
Expand Down Expand Up @@ -777,6 +805,8 @@ def negative_binomial(n, p, size=None):
pass
elif n <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_negative_binomial(n, p, size).get_pyobj()

Expand Down Expand Up @@ -862,6 +892,8 @@ def noncentral_chisquare(df, nonc, size=None):
pass
elif nonc < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_noncentral_chisquare(df, nonc, size).get_pyobj()

Expand Down Expand Up @@ -912,6 +944,8 @@ def pareto(a, size=None):
pass
elif a <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_pareto(a, size).get_pyobj()

Expand Down Expand Up @@ -981,6 +1015,8 @@ def poisson(lam=1.0, size=None):
pass
elif lam < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_poisson(lam, size).get_pyobj()

Expand Down Expand Up @@ -1016,6 +1052,8 @@ def power(a, size=None):
pass
elif a <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_power(a, size).get_pyobj()

Expand Down Expand Up @@ -1423,6 +1461,8 @@ def rayleigh(scale=1.0, size=None):
pass
elif scale < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_rayleigh(scale, size).get_pyobj()

Expand Down Expand Up @@ -1495,6 +1535,8 @@ def shuffle(x1):
if x1_desc:
if not dpnp.is_type_supported(x1_desc.dtype):
pass
elif dpnp.is_cuda_backend(x1):
pass
else:
dpnp_rng_shuffle(x1_desc).get_pyobj()
return
Expand Down Expand Up @@ -1545,6 +1587,8 @@ def seed(seed=None, device=None, sycl_queue=None):
pass
elif seed < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
# TODO:
# migrate to a single approach with RandomState class
Expand Down Expand Up @@ -1577,7 +1621,10 @@ def standard_cauchy(size=None):
"""

if not use_origin_backend(size):
return dpnp_rng_standard_cauchy(size).get_pyobj()
if dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_standard_cauchy(size).get_pyobj()

return call_origin(numpy.random.standard_cauchy, size)

Expand All @@ -1602,7 +1649,10 @@ def standard_exponential(size=None):
"""

if not use_origin_backend(size):
return dpnp_rng_standard_exponential(size).get_pyobj()
if dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_standard_exponential(size).get_pyobj()

return call_origin(numpy.random.standard_exponential, size)

Expand Down Expand Up @@ -1636,6 +1686,8 @@ def standard_gamma(shape, size=None):
pass
elif shape < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_standard_gamma(shape, size).get_pyobj()

Expand Down Expand Up @@ -1714,6 +1766,8 @@ def standard_t(df, size=None):
pass
elif df <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_standard_t(df, size).get_pyobj()

Expand Down Expand Up @@ -1758,6 +1812,8 @@ def triangular(left, mode, right, size=None):
pass
elif left == right:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_triangular(left, mode, right, size).get_pyobj()

Expand Down Expand Up @@ -1862,6 +1918,8 @@ def vonmises(mu, kappa, size=None):
return dpnp.nan
elif kappa < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_vonmises(mu, kappa, size).get_pyobj()

Expand Down Expand Up @@ -1898,6 +1956,8 @@ def wald(mean, scale, size=None):
pass
elif scale <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_wald(mean, scale, size).get_pyobj()

Expand Down Expand Up @@ -1930,6 +1990,8 @@ def weibull(a, size=None):
pass
elif a < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_weibull(a, size).get_pyobj()

Expand Down Expand Up @@ -1962,6 +2024,8 @@ def zipf(a, size=None):
pass
elif a <= 1:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_zipf(a, size).get_pyobj()

Expand Down
Loading
Loading