Skip to content

Commit

Permalink
Handle dpnp functions and tests to run on CUDA devices (#2075)
Browse files Browse the repository at this point in the history
This PR suggests updating some function implementations and tests to run
on CUDA devices
The PR includes:
1. Raise NotImplemetedError for unsupported functions
2. Create **skipped_tests_cuda.tbl** for unsupported functions
3. Skipped tests with a bug ticket for functions that should run on CUDA
4. Implement **is_cuda_backend()** and **is_cuda_device()** to determine
that the object has a CUDA backend and that the tests run on CUDA
---------

Co-authored-by: Vladislav Perevezentsev <[email protected]>
  • Loading branch information
npolina4 and vlad-perevezentsev authored Jan 17, 2025
1 parent 498e705 commit 952a798
Show file tree
Hide file tree
Showing 26 changed files with 1,216 additions and 44 deletions.
36 changes: 36 additions & 0 deletions dpnp/dpnp_iface.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@
"get_result_array",
"get_usm_ndarray",
"get_usm_ndarray_or_scalar",
"is_cuda_backend",
"is_supported_array_or_scalar",
"is_supported_array_type",
"synchronize_array_data",
Expand Down Expand Up @@ -681,6 +682,41 @@ def get_usm_ndarray_or_scalar(a):
return a if dpnp.isscalar(a) else get_usm_ndarray(a)


def is_cuda_backend(obj=None):
"""
Checks that object has a CUDA backend.
Parameters
----------
obj : {Device, SyclDevice, SyclQueue, dpnp.ndarray, usm_ndarray, None},
optional
An input object with sycl_device property to check device backend.
If `obj` is ``None``, device backend will be checked for the default
queue.
Default: ``None``.
Returns
-------
out : bool
Return ``True`` if data of the input object resides on a CUDA backend,
otherwise ``False``.
"""

if obj is None:
sycl_device = dpctl.select_default_device()
elif isinstance(obj, dpctl.SyclDevice):
sycl_device = obj
else:
sycl_device = getattr(obj, "sycl_device", None)
if (
sycl_device is not None
and sycl_device.backend == dpctl.backend_type.cuda
):
return True
return False


def is_supported_array_or_scalar(a):
"""
Return ``True`` if `a` is a scalar or an array of either
Expand Down
6 changes: 6 additions & 0 deletions dpnp/dpnp_iface_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ def choose(x1, choices, out=None, mode="raise"):
:obj:`dpnp.take_along_axis` : Preferable if choices is an array.
"""

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)

choices_list = []
Expand All @@ -137,6 +138,11 @@ def choose(x1, choices, out=None, mode="raise"):
)

if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
raise NotImplementedError(
"Running on CUDA is currently not supported"
)

if any(not desc for desc in choices_list):
pass
elif out is not None:
Expand Down
4 changes: 4 additions & 0 deletions dpnp/dpnp_iface_libmath.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,10 @@ def erf(in_array1):
in_array1, copy_when_strides=False, copy_when_nondefault_queue=False
)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
return dpnp_erf(x1_desc).get_pyobj()

result = create_output_descriptor_py(
Expand Down
12 changes: 10 additions & 2 deletions dpnp/dpnp_iface_mathematical.py
Original file line number Diff line number Diff line change
Expand Up @@ -2945,8 +2945,16 @@ def modf(x1, **kwargs):
"""

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc and not kwargs:
return dpnp_modf(x1_desc)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
raise NotImplementedError(
"Running on CUDA is currently not supported"
)

if kwargs:
pass
else:
return dpnp_modf(x1_desc)

return call_origin(numpy.modf, x1, **kwargs)

Expand Down
5 changes: 5 additions & 0 deletions dpnp/dpnp_iface_sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,11 @@ def partition(x1, kth, axis=-1, kind="introselect", order=None):

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
raise NotImplementedError(
"Running on CUDA is currently not supported"
)

if not isinstance(kth, int):
pass
elif x1_desc.ndim == 0:
Expand Down
Loading

0 comments on commit 952a798

Please sign in to comment.