Skip to content

Commit

Permalink
add argmin on diopi torch impl & del diopi test hyperbolic_trigo_func…
Browse files Browse the repository at this point in the history
…tion's save_args param
  • Loading branch information
DoorKickers committed Aug 14, 2024
1 parent 56b3150 commit 74b8872
Show file tree
Hide file tree
Showing 4 changed files with 83 additions and 1 deletion.
41 changes: 40 additions & 1 deletion diopi_test/python/configs/diopi_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1196,7 +1196,6 @@
name=['sinh', 'cosh', 'asinh', 'acosh', 'atanh'],
interface=['torch'],
is_inplace=True,
saved_args=dict(output=0),
dtype=[np.float16, np.float32, np.float64],
tensor_para=dict(
gen_fn='Genfunc.randn',
Expand Down Expand Up @@ -5556,6 +5555,46 @@
),
),

'argmin': dict(
name=['argmin'],
interface=["torch"],
para=dict(
dim=[0, -1, 0, 1, None, -2, 2, 1],
keepdim=[True, False, True, False, False, True, True, False],
),
tensor_para=dict(
args=[
{
"ins": ['input'],
"shape": ((), (1,), (1024, 80), (2, 256, 256), (2, 1, 64, 64),
(12, 0), (2, 0, 9), (0, 9, 8, 7)),
"dtype": [np.float64, np.float16, np.float32, np.int32, np.int16,
np.int64, np.uint8, np.int8],
"gen_fn": 'Genfunc.randn',
},
],
),
),

'argmin_same_value': dict(
name=['argmin'],
interface=["torch"],
para=dict(
dim=[-1, 0, None, 1],
keepdim=[True, False, True, False],
),
tensor_para=dict(
args=[
{
"ins": ['input'],
"shape": ((1,), (1024, 80), (2, 256, 256), (2, 1, 64, 64)),
"dtype": [np.float32],
"gen_fn": 'Genfunc.zeros',
},
],
),
),

'adadelta': dict(
name=["adadelta"],
interface=["CustomizedTest"],
Expand Down
23 changes: 23 additions & 0 deletions diopi_test/python/conformance/diopi_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -3563,6 +3563,29 @@ def argmax(input, dim=None, keepdim=False):

return out

def argmin(input, dim=None, keepdim=False):
sizeO = list(input.size().data)
if len(sizeO) > 0 and dim is not None:
assert dim < len(sizeO), "dim out of index"
if keepdim:
sizeO[dim] = 1
else:
sizeO = sizeO[:dim] + sizeO[dim + 1 :]
else:
sizeO = [1]

out = Tensor(sizeO, from_numpy_dtype(glob_vars.int_type))
func = check_function("diopiArgmin")
# todo: check the reason of using keepdim
ret = (
func(input.context(), out, input, keepdim)
if dim is None
else func(input.context(), out, input, dim, keepdim)
)
check_returncode(ret)

return out


def smooth_l1_loss(input, target, reduction="mean", beta=1.0):
assert (
Expand Down
10 changes: 10 additions & 0 deletions impl/torch/functions/functions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3200,6 +3200,16 @@ diopiError_t diopiArgmax(diopiContextHandle_t ctx, diopiTensorHandle_t out, diop
return diopiSuccess;
}

diopiError_t diopiArgmin(diopiContextHandle_t ctx, diopiTensorHandle_t out, diopiConstTensorHandle_t input, const int64_t* dim, bool keepdim) {
impl::aten::setCurStream(ctx);
auto atOut = impl::aten::buildATen(out);
auto atInput = impl::aten::buildATen(input);
c10::optional<int64_t> atDim = dim ? c10::optional<int64_t>(*dim) : c10::nullopt;
CALL_ATEN_CUDA_FUNC(argmin_out, atOut, atInput, atDim, keepdim);

return diopiSuccess;
}

diopiError_t diopiSmoothL1Loss(diopiContextHandle_t ctx, diopiTensorHandle_t out, diopiConstTensorHandle_t input, diopiConstTensorHandle_t target,
diopiReduction_t reduction, double beta) {
impl::aten::setCurStream(ctx);
Expand Down
10 changes: 10 additions & 0 deletions proto/include/diopi/functions.h
Original file line number Diff line number Diff line change
Expand Up @@ -2713,6 +2713,16 @@ DIOPI_API diopiError_t diopiCdistBackward(diopiContextHandle_t ctx, diopiTensorH
*/
DIOPI_API diopiError_t diopiArgmax(diopiContextHandle_t ctx, diopiTensorHandle_t out, diopiConstTensorHandle_t input, const int64_t* dim, bool keepdim);

/**
* @brief Returns the indices of the minimum values of a tensor across a dimension.
* @param[in] ctx Context environment.
* @param[in] input the input tensor. type=[float32, float64, float16, int16, int32, int64, uint8, int8, bool].
* @param[in] dim the dimension to do the operation over. type=[int32, int64].
* @param[in] keepdim whether the output tensor has dim retained or not.
* @param[out] out the output tensor. type=[int32, int64].
*/
DIOPI_API diopiError_t diopiArgmin(diopiContextHandle_t ctx, diopiTensorHandle_t out, diopiConstTensorHandle_t input, const int64_t* dim, bool keepdim);

/**
* @brief The function is used to implement the Adadelta optimizer. Its functionality is to perform a single parameter update.
* @param[in] ctx Context environment.
Expand Down

0 comments on commit 74b8872

Please sign in to comment.