Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix qml.math.get_interface for scipy input #7015

Merged
merged 18 commits into from
Feb 28, 2025
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions doc/releases/changelog-dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -419,6 +419,10 @@

<h3>Bug fixes 🐛</h3>

* `qml.math.get_interface` now correctly extracts the `"scipy"` interface if provided a list/array
of sparse matrices.
[(#7015)](https://github.com/PennyLaneAI/pennylane/pull/7015)

* `qml.capture.PlxprInterpreter` now flattens pytree arguments before evaluation.
[(#6975)](https://github.com/PennyLaneAI/pennylane/pull/6975)

Expand Down
15 changes: 5 additions & 10 deletions pennylane/math/interface_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ def __hash__(self):
"""list[Interface]: allowed interface names"""


# pylint: disable=too-many-return-statements
def get_interface(*values):
"""Determines the correct framework to dispatch to given a tensor-like object or a
sequence of tensor-like objects.
Expand Down Expand Up @@ -139,17 +140,11 @@ def get_interface(*values):
UserWarning,
)

if "tensorflow" in interfaces:
return "tensorflow"
priority_interfaces = {"tensorflow", "torch", "jax", "autograd", "scipy"}
matching_interface = priority_interfaces.intersection(interfaces)

if "torch" in interfaces:
return "torch"

if "jax" in interfaces:
return "jax"

if "autograd" in interfaces:
return "autograd"
if matching_interface:
return matching_interface.pop()

return "numpy"

Expand Down
14 changes: 6 additions & 8 deletions pennylane/math/multi_dispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
from collections.abc import Sequence

# pylint: disable=wrong-import-order
import autoray as ar
import numpy as onp
from autograd.numpy.numpy_boxes import ArrayBox
from autoray import numpy as np
Expand Down Expand Up @@ -165,12 +164,11 @@ def kron(*args, like=None, **kwargs):

if like == "torch":
mats = [
ar.numpy.asarray(arg, like="torch") if isinstance(arg, onp.ndarray) else arg
for arg in args
np.asarray(arg, like="torch") if isinstance(arg, onp.ndarray) else arg for arg in args
]
return ar.numpy.kron(*mats)
return np.kron(*mats)

return ar.numpy.kron(*args, like=like, **kwargs)
return np.kron(*args, like=like, **kwargs)


@multi_dispatch(argnum=[0], tensor_list=[0])
Expand Down Expand Up @@ -309,11 +307,11 @@ def matmul(tensor1, tensor2, like=None):
"""Returns the matrix product of two tensors."""
if like == "torch":
if get_interface(tensor1) != "torch":
tensor1 = ar.numpy.asarray(tensor1, like="torch")
tensor1 = np.asarray(tensor1, like="torch")
if get_interface(tensor2) != "torch":
tensor2 = ar.numpy.asarray(tensor2, like="torch")
tensor2 = np.asarray(tensor2, like="torch")
tensor2 = cast_like(tensor2, tensor1) # pylint: disable=arguments-out-of-order
return ar.numpy.matmul(tensor1, tensor2, like=like)
return np.matmul(tensor1, tensor2, like=like)


@multi_dispatch(argnum=[0, 1])
Expand Down
8 changes: 7 additions & 1 deletion pennylane/math/single_dispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,17 +55,23 @@ def _builtins_shape(x):
return ar.shape(x)


def _builtins_coerce(x, like=None):
return ar.numpy.asarray(x, like=like)


ar.register_function("builtins", "ndim", _builtins_ndim)
ar.register_function("builtins", "shape", _builtins_shape)
ar.register_function("builtins", "coerce", _builtins_coerce)
ar.register_function("builtins", "logical_mod", lambda x, y: x % y)
ar.register_function("builtins", "logical_xor", lambda x, y: x ^ y)

# -------------------------------- SciPy --------------------------------- #
# the following is required to ensure that SciPy sparse Hamiltonians passed to
# qml.SparseHamiltonian are not automatically 'unwrapped' to dense NumPy arrays.
ar.register_function("scipy", "to_numpy", lambda x: x)

ar.register_function("scipy", "coerce", ar.numpy.coerce)
ar.register_function("scipy", "shape", np.shape)
ar.register_function("scipy", "dot", np.dot)
ar.register_function("scipy", "conj", np.conj)
ar.register_function("scipy", "transpose", np.transpose)
ar.register_function("scipy", "ndim", np.ndim)
Expand Down
10 changes: 9 additions & 1 deletion tests/math/test_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -1017,12 +1017,20 @@ def test_expand_dims_tf(self, shape, axis, new_shape):
@pytest.mark.parametrize("t,interface", interface_test_data)
def test_get_interface(t, interface):
"""Test that the interface of a tensor-like object

is correctly returned."""
res = fn.get_interface(t)
assert res == interface


def test_get_interface_scipy():
"""Test that the interface of a scipy sparse matrix is correctly returned."""
matrix = sci.sparse.csr_matrix([[0, 1], [1, 0]])

assert fn.get_interface(matrix) == "scipy"
assert fn.get_interface(matrix, matrix) == "scipy"
assert fn.get_interface(*[matrix, matrix]) == "scipy"


# pylint: disable=too-few-public-methods
class TestInterfaceEnum:
"""Test the Interface enum class"""
Expand Down
Loading