diff --git a/diopi_test/python/configs/diopi_configs.py b/diopi_test/python/configs/diopi_configs.py index 644cd9bf9..dff448be0 100755 --- a/diopi_test/python/configs/diopi_configs.py +++ b/diopi_test/python/configs/diopi_configs.py @@ -1196,7 +1196,6 @@ name=['sinh', 'cosh', 'asinh', 'acosh', 'atanh'], interface=['torch'], is_inplace=True, - saved_args=dict(output=0), dtype=[np.float16, np.float32, np.float64], tensor_para=dict( gen_fn='Genfunc.randn', @@ -5556,6 +5555,46 @@ ), ), + 'argmin': dict( + name=['argmin'], + interface=["torch"], + para=dict( + dim=[0, -1, 0, 1, None, -2, 2, 1], + keepdim=[True, False, True, False, False, True, True, False], + ), + tensor_para=dict( + args=[ + { + "ins": ['input'], + "shape": ((), (1,), (1024, 80), (2, 256, 256), (2, 1, 64, 64), + (12, 0), (2, 0, 9), (0, 9, 8, 7)), + "dtype": [np.float64, np.float16, np.float32, np.int32, np.int16, + np.int64, np.uint8, np.int8], + "gen_fn": 'Genfunc.randn', + }, + ], + ), + ), + + 'argmin_same_value': dict( + name=['argmin'], + interface=["torch"], + para=dict( + dim=[-1, 0, None, 1], + keepdim=[True, False, True, False], + ), + tensor_para=dict( + args=[ + { + "ins": ['input'], + "shape": ((1,), (1024, 80), (2, 256, 256), (2, 1, 64, 64)), + "dtype": [np.float32], + "gen_fn": 'Genfunc.zeros', + }, + ], + ), + ), + 'adadelta': dict( name=["adadelta"], interface=["CustomizedTest"], diff --git a/diopi_test/python/conformance/diopi_functions.py b/diopi_test/python/conformance/diopi_functions.py index 065f0718c..aef6c4076 100644 --- a/diopi_test/python/conformance/diopi_functions.py +++ b/diopi_test/python/conformance/diopi_functions.py @@ -3563,6 +3563,29 @@ def argmax(input, dim=None, keepdim=False): return out +def argmin(input, dim=None, keepdim=False): + sizeO = list(input.size().data) + if len(sizeO) > 0 and dim is not None: + assert dim < len(sizeO), "dim out of index" + if keepdim: + sizeO[dim] = 1 + else: + sizeO = sizeO[:dim] + sizeO[dim + 1 :] + else: + sizeO = [1] + + out = Tensor(sizeO, from_numpy_dtype(glob_vars.int_type)) + func = check_function("diopiArgmin") + # todo: check the reason of using keepdim + ret = ( + func(input.context(), out, input, keepdim) + if dim is None + else func(input.context(), out, input, dim, keepdim) + ) + check_returncode(ret) + + return out + def smooth_l1_loss(input, target, reduction="mean", beta=1.0): assert ( diff --git a/impl/torch/functions/functions.cpp b/impl/torch/functions/functions.cpp index 6d15cd29b..7710fd6d4 100644 --- a/impl/torch/functions/functions.cpp +++ b/impl/torch/functions/functions.cpp @@ -3200,6 +3200,16 @@ diopiError_t diopiArgmax(diopiContextHandle_t ctx, diopiTensorHandle_t out, diop return diopiSuccess; } +diopiError_t diopiArgmin(diopiContextHandle_t ctx, diopiTensorHandle_t out, diopiConstTensorHandle_t input, const int64_t* dim, bool keepdim) { + impl::aten::setCurStream(ctx); + auto atOut = impl::aten::buildATen(out); + auto atInput = impl::aten::buildATen(input); + c10::optional atDim = dim ? c10::optional(*dim) : c10::nullopt; + CALL_ATEN_CUDA_FUNC(argmin_out, atOut, atInput, atDim, keepdim); + + return diopiSuccess; +} + diopiError_t diopiSmoothL1Loss(diopiContextHandle_t ctx, diopiTensorHandle_t out, diopiConstTensorHandle_t input, diopiConstTensorHandle_t target, diopiReduction_t reduction, double beta) { impl::aten::setCurStream(ctx); diff --git a/proto/include/diopi/functions.h b/proto/include/diopi/functions.h index a0dd3ebdf..9a774dfd8 100644 --- a/proto/include/diopi/functions.h +++ b/proto/include/diopi/functions.h @@ -2713,6 +2713,16 @@ DIOPI_API diopiError_t diopiCdistBackward(diopiContextHandle_t ctx, diopiTensorH */ DIOPI_API diopiError_t diopiArgmax(diopiContextHandle_t ctx, diopiTensorHandle_t out, diopiConstTensorHandle_t input, const int64_t* dim, bool keepdim); +/** + * @brief Returns the indices of the minimum values of a tensor across a dimension. + * @param[in] ctx Context environment. + * @param[in] input the input tensor. type=[float32, float64, float16, int16, int32, int64, uint8, int8, bool]. + * @param[in] dim the dimension to do the operation over. type=[int32, int64]. + * @param[in] keepdim whether the output tensor has dim retained or not. + * @param[out] out the output tensor. type=[int32, int64]. + */ +DIOPI_API diopiError_t diopiArgmin(diopiContextHandle_t ctx, diopiTensorHandle_t out, diopiConstTensorHandle_t input, const int64_t* dim, bool keepdim); + /** * @brief The function is used to implement the Adadelta optimizer. Its functionality is to perform a single parameter update. * @param[in] ctx Context environment.