Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Handle dpnp functions and tests to run on CUDA devices #2075

Merged
merged 51 commits into from
Jan 17, 2025
Merged
Show file tree
Hide file tree
Changes from 50 commits
Commits
Show all changes
51 commits
Select commit Hold shift + click to select a range
b6bd08a
Testing is adapted for cuda devices
npolina4 Sep 20, 2024
5f1b083
Apply fallback to numpy for all unsupported functions on cuda device.
npolina4 Sep 24, 2024
fe8fe11
update tests
npolina4 Sep 25, 2024
0c64722
Applied review comments
npolina4 Sep 26, 2024
4ad814c
Merge branch 'master' into tests_cuda
npolina4 Oct 22, 2024
6e2c3c7
Update test_indexing.py
npolina4 Oct 25, 2024
4e3c87c
Update test_solve.py
npolina4 Oct 25, 2024
94a418e
Update test_histogram.py
npolina4 Oct 25, 2024
1461e81
Update test_histogram.py
npolina4 Oct 25, 2024
47f51e3
Merge branch 'master' into tests_cuda
npolina4 Oct 25, 2024
2fca5f7
Merge commit '7bfe0c8eec481452dcdd07d99eaf01373769ab5b' into tests_cuda
vlad-perevezentsev Nov 14, 2024
519008e
Update skipped_tests_cuda.tbl
vlad-perevezentsev Nov 14, 2024
a38949d
Apply fallback to numpy for TestRational and test_copy_multigpu
vlad-perevezentsev Nov 14, 2024
139c784
Address remarks
vlad-perevezentsev Nov 14, 2024
f7e3778
Merge master into tests_cuda
vlad-perevezentsev Nov 21, 2024
42f20fc
Merge master into tests_cuda
vlad-perevezentsev Nov 27, 2024
351e12d
Use dpctl.select_default_device() in is_cuda_backend() func
vlad-perevezentsev Nov 29, 2024
590dbc8
Raise NotImplementedError in unsupported functions on CUDA
vlad-perevezentsev Nov 29, 2024
cc48533
Implement is_cuda_device() func for tests in helper.py
vlad-perevezentsev Nov 29, 2024
645c6d9
Skipped tests for unsupported functions on CUDA
vlad-perevezentsev Nov 29, 2024
6f688fe
Update test_arithmetic.py
vlad-perevezentsev Nov 29, 2024
b217cc5
Handle TestSpacing to run on CUDA
vlad-perevezentsev Nov 29, 2024
5179c06
Update fft tests to run on CUDA
vlad-perevezentsev Nov 29, 2024
f1c5eaf
Update linalg tests to run on CUDA
vlad-perevezentsev Dec 2, 2024
0a79e66
Avoid using dpnp.random in cupy tests on CUDA
vlad-perevezentsev Dec 2, 2024
e3e9afe
Remove previously added fixtures for unsupported funcs on CUDA
vlad-perevezentsev Dec 2, 2024
f015b88
Merge master into tests_cuda
vlad-perevezentsev Dec 2, 2024
00b7f02
Merge master into tests_cuda
vlad-perevezentsev Dec 5, 2024
ff98aef
Apply remarks
vlad-perevezentsev Dec 5, 2024
20acd25
Merge master into tests_cuda
vlad-perevezentsev Dec 11, 2024
d3f8d12
Update skipped_tests_cuda.tbl
vlad-perevezentsev Dec 11, 2024
7408324
Handle new fft tests for CUDA
vlad-perevezentsev Dec 11, 2024
a7985b3
Skip TestCond::test_nan on CUDA
vlad-perevezentsev Dec 11, 2024
70c9b0a
Unskip linalg tests due to fix in gh-2212
vlad-perevezentsev Dec 11, 2024
556c1a3
Create non-singular matrix in test_usm_type.py::test_cond
vlad-perevezentsev Dec 11, 2024
2931a2b
Update jira ticket number for qr issue
vlad-perevezentsev Dec 11, 2024
73f2ca0
Fix fallback numpy logic in def modf
vlad-perevezentsev Dec 11, 2024
8da6027
Revert deleted fixtures for TestChoose
vlad-perevezentsev Dec 11, 2024
e87174e
Merge master into tests_cuda
vlad-perevezentsev Dec 11, 2024
82f3a17
Merge master into tests_cuda
vlad-perevezentsev Dec 18, 2024
c83c062
Merge master into tests_cuda
vlad-perevezentsev Dec 20, 2024
6769469
Add cuda to list_of_backend_str in test_sycl_queue.py
vlad-perevezentsev Dec 20, 2024
1a3142f
Merge master into tests_cuda
vlad-perevezentsev Dec 20, 2024
2f2235f
Merge master into tests_cuda
vlad-perevezentsev Jan 10, 2025
f0ee02a
Remove skip due to SAT-7588
vlad-perevezentsev Jan 10, 2025
2427599
Update TestFftn::test_fftn
vlad-perevezentsev Jan 10, 2025
26275a7
Pass self to is_cuda_backend
vlad-perevezentsev Jan 10, 2025
773b177
Merge master into tests_cuda
vlad-perevezentsev Jan 14, 2025
deb17ac
Apply remarks
vlad-perevezentsev Jan 14, 2025
79be7d3
Merge master into tests_cuda
vlad-perevezentsev Jan 14, 2025
58b81a8
Apply a minor comment
vlad-perevezentsev Jan 16, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions dpnp/dpnp_iface.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@
"get_result_array",
"get_usm_ndarray",
"get_usm_ndarray_or_scalar",
"is_cuda_backend",
"is_supported_array_or_scalar",
"is_supported_array_type",
"synchronize_array_data",
Expand Down Expand Up @@ -681,6 +682,41 @@ def get_usm_ndarray_or_scalar(a):
return a if dpnp.isscalar(a) else get_usm_ndarray(a)


def is_cuda_backend(obj=None):
"""
Checks that object has a CUDA backend.

Parameters
----------
obj : {Device, SyclDevice, SyclQueue, dpnp.ndarray, usm_ndarray, None},
optional
An input object with sycl_device property to check device backend.
If `obj` is ``None``, device backend will be checked for the default
queue.
Default: ``None``.

Returns
-------
out : bool
Return ``True`` if data of the input object resides on a CUDA backend,
otherwise ``False``.

"""

if obj is None:
sycl_device = dpctl.select_default_device()
elif isinstance(obj, dpctl.SyclDevice):
sycl_device = obj
else:
sycl_device = getattr(obj, "sycl_device", None)
if (
sycl_device is not None
and sycl_device.backend == dpctl.backend_type.cuda
):
return True
return False


def is_supported_array_or_scalar(a):
"""
Return ``True`` if `a` is a scalar or an array of either
Expand Down
6 changes: 6 additions & 0 deletions dpnp/dpnp_iface_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ def choose(x1, choices, out=None, mode="raise"):
:obj:`dpnp.take_along_axis` : Preferable if choices is an array.

"""

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)

choices_list = []
Expand All @@ -137,6 +138,11 @@ def choose(x1, choices, out=None, mode="raise"):
)

if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
raise NotImplementedError(
"Running on CUDA is currently not supported"
)

if any(not desc for desc in choices_list):
pass
elif out is not None:
Expand Down
4 changes: 4 additions & 0 deletions dpnp/dpnp_iface_libmath.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,10 @@ def erf(in_array1):
in_array1, copy_when_strides=False, copy_when_nondefault_queue=False
)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
return dpnp_erf(x1_desc).get_pyobj()

result = create_output_descriptor_py(
Expand Down
12 changes: 10 additions & 2 deletions dpnp/dpnp_iface_mathematical.py
Original file line number Diff line number Diff line change
Expand Up @@ -2945,8 +2945,16 @@ def modf(x1, **kwargs):
"""

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc and not kwargs:
return dpnp_modf(x1_desc)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
raise NotImplementedError(
"Running on CUDA is currently not supported"
)

if kwargs:
pass
else:
return dpnp_modf(x1_desc)

return call_origin(numpy.modf, x1, **kwargs)

Expand Down
5 changes: 5 additions & 0 deletions dpnp/dpnp_iface_sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,11 @@ def partition(x1, kth, axis=-1, kind="introselect", order=None):

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
raise NotImplementedError(
"Running on CUDA is currently not supported"
)

if not isinstance(kth, int):
pass
elif x1_desc.ndim == 0:
Expand Down
Loading
Loading