From b22403966cae0d983851acd549ac63fc948ca31b Mon Sep 17 00:00:00 2001 From: momo609 <963372609@qq.com> Date: Tue, 30 Jan 2024 21:08:51 +0800 Subject: [PATCH] fix --- .../csrc/pytorch/npu/chamfer_distance_npu.cpp | 30 +++++++++---------- mmcv/ops/csrc/pytorch/npu/common_util.h | 11 ++++--- mmcv/ops/csrc/pytorch/npu/focal_loss_npu.cpp | 4 +-- .../pytorch/npu/fused_bias_leakyrelu_npu.cpp | 2 +- .../csrc/pytorch/npu/gather_points_npu.cpp | 2 +- mmcv/ops/csrc/pytorch/npu/roi_align_npu.cpp | 2 +- .../pytorch/npu/three_interpolate_npu.cpp | 2 +- setup.py | 5 ++-- 8 files changed, 29 insertions(+), 29 deletions(-) diff --git a/mmcv/ops/csrc/pytorch/npu/chamfer_distance_npu.cpp b/mmcv/ops/csrc/pytorch/npu/chamfer_distance_npu.cpp index c22ffeaa80f..9d006a916c7 100644 --- a/mmcv/ops/csrc/pytorch/npu/chamfer_distance_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/chamfer_distance_npu.cpp @@ -5,11 +5,11 @@ using namespace NPU_NAME_SPACE; using namespace std; void chamfer_distance_forward_npu(Tensor XYZ1, Tensor XYZ2, Tensor dist1, - Tensor dist2, Tensor idx1, Tensor idx2) { + Tensor dist2, Tensor idx1, Tensor idx2) { at::Tensor xyz1 = at::ones_like(XYZ1); at::Tensor xyz2 = at::ones_like(XYZ2); - xyz1 = XYZ1.transpose(1,2).transpose(0,1); - xyz2 = XYZ2.transpose(1,2).transpose(0,1); + xyz1 = XYZ1.transpose(1, 2).transpose(0, 1); + xyz2 = XYZ2.transpose(1, 2).transpose(0, 1); OpCommand cmd; cmd.Name("ChamferDistance") .Input(xyz1) @@ -21,20 +21,20 @@ void chamfer_distance_forward_npu(Tensor XYZ1, Tensor XYZ2, Tensor dist1, .Run(); } -void chamfer_distance_backward_npu(Tensor xyz1, Tensor xyz2, - Tensor idx1, Tensor idx2, Tensor grad_dist1, Tensor grad_dist2, - Tensor grad_xyz1, Tensor grad_xyz2) { - EXEC_NPU_CMD(aclnnChamferDistanceBackward, xyz1, xyz2, idx1, idx2, - grad_dist1, grad_dist2, grad_xyz1, grad_xyz2); -} +void chamfer_distance_backward_npu(Tensor xyz1, Tensor xyz2, Tensor idx1, + Tensor idx2, Tensor grad_dist1, + Tensor grad_dist2, Tensor grad_xyz1, + Tensor grad_xyz2) { + EXEC_NPU_CMD(aclnnChamferDistanceBackward, xyz1, xyz2, idx1, idx2, grad_dist1, + grad_dist2, grad_xyz1, grad_xyz2); void chamfer_distance_forward_impl(Tensor XYZ1, Tensor XYZ2, Tensor dist1, - Tensor dist2, Tensor idx1, Tensor idx2); -REGISTER_NPU_IMPL(chamfer_distance_forward_impl, - chamfer_distance_forward_npu); + Tensor dist2, Tensor idx1, Tensor idx2); +REGISTER_NPU_IMPL(chamfer_distance_forward_impl, chamfer_distance_forward_npu); -void chamfer_distance_backward_impl(Tensor xyz1, Tensor xyz2, Tensor idx1, Tensor idx2, - Tensor grad_dist1, Tensor grad_dist2, - Tensor grad_xyz1, Tensor grad_xyz2); +void chamfer_distance_backward_impl(Tensor xyz1, Tensor xyz2, Tensor idx1, + Tensor idx2, Tensor grad_dist1, + Tensor grad_dist2, Tensor grad_xyz1, + Tensor grad_xyz2); REGISTER_NPU_IMPL(chamfer_distance_backward_impl, chamfer_distance_backward_npu); diff --git a/mmcv/ops/csrc/pytorch/npu/common_util.h b/mmcv/ops/csrc/pytorch/npu/common_util.h index 5a303e87642..e6cf8879840 100644 --- a/mmcv/ops/csrc/pytorch/npu/common_util.h +++ b/mmcv/ops/csrc/pytorch/npu/common_util.h @@ -3,12 +3,11 @@ const int SIZE = 8; c10::SmallVector array_to_vector(c10::IntArrayRef shape) { - c10::SmallVector shape_small_vec; - for (uint64_t i = 0; i < shape.size(); i++) { - shape_small_vec.emplace_back(shape[i]); - } - - return shape_small_vec; + c10::SmallVector shape_small_vec; + for (uint64_t i = 0; i < shape.size(); i++) { + shape_small_vec.emplace_back(shape[i]); + } + return shape_small_vec; } #endif // MMCV_OPS_CSRC_COMMON__UTIL_HPP_ diff --git a/mmcv/ops/csrc/pytorch/npu/focal_loss_npu.cpp b/mmcv/ops/csrc/pytorch/npu/focal_loss_npu.cpp index b39e24f2df6..5030fed0e70 100644 --- a/mmcv/ops/csrc/pytorch/npu/focal_loss_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/focal_loss_npu.cpp @@ -102,11 +102,11 @@ void softmax_focal_loss_forward_npu(Tensor input, Tensor target, Tensor weight, at::IntArrayRef size_array = at::IntArrayRef(sizes); c10::SmallVector offsetVec; for (uint64_t i = 0; i < offset.size(); i++) { - offsetVec.emplace_back(offset[i]); + offsetVec.emplace_back(offset[i]); } c10::SmallVector sizeVec; for (uint64_t i = 0; i < size_array.size(); i++) { - sizeVec.emplace_back(size_array[i]); + sizeVec.emplace_back(size_array[i]); } OpCommand cmd2; cmd2.Name("Slice") diff --git a/mmcv/ops/csrc/pytorch/npu/fused_bias_leakyrelu_npu.cpp b/mmcv/ops/csrc/pytorch/npu/fused_bias_leakyrelu_npu.cpp index 94cc95a391e..da278ca3c54 100644 --- a/mmcv/ops/csrc/pytorch/npu/fused_bias_leakyrelu_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/fused_bias_leakyrelu_npu.cpp @@ -17,7 +17,7 @@ Tensor fused_bias_leakyrelu_npu(const Tensor &input, const Tensor &bias, int input_length = input_size.size(); c10::SmallVector input_size_tmp; for (uint64_t i = 0; i < input_size.size(); i++) { - input_size_tmp.emplace_back(input_size[i]); + input_size_tmp.emplace_back(input_size[i]); } if (input_length > 1) { for (int i = 0; i < input_length; i++) { diff --git a/mmcv/ops/csrc/pytorch/npu/gather_points_npu.cpp b/mmcv/ops/csrc/pytorch/npu/gather_points_npu.cpp index 35b5c00b73a..cf3a577ce1d 100644 --- a/mmcv/ops/csrc/pytorch/npu/gather_points_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/gather_points_npu.cpp @@ -35,7 +35,7 @@ void gather_points_backward_npu(int b, int c, int n, int npoints, auto shape = idx.sizes(); c10::SmallVector pad_size; for (uint64_t i = 0; i < shape.size(); i++) { - pad_size.emplace_back(shape[i]); + pad_size.emplace_back(shape[i]); } at::Tensor trans_grad_points = grad_points.transpose(1, 2).contiguous(); at::Tensor grad_points_view = trans_grad_points.view( diff --git a/mmcv/ops/csrc/pytorch/npu/roi_align_npu.cpp b/mmcv/ops/csrc/pytorch/npu/roi_align_npu.cpp index 93d9c2b9cbb..0cddac8ed0b 100644 --- a/mmcv/ops/csrc/pytorch/npu/roi_align_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/roi_align_npu.cpp @@ -43,7 +43,7 @@ void roi_align_backward_npu(Tensor grad_output, Tensor rois, Tensor argmax_y, auto shape = grad_input.sizes(); c10::SmallVector xdiff_shape; for (uint64_t i = 0; i < shape.size(); i++) { - xdiff_shape.emplace_back(shape[i]); + xdiff_shape.emplace_back(shape[i]); } OpCommand cmd; cmd.Name("ROIAlignGrad") diff --git a/mmcv/ops/csrc/pytorch/npu/three_interpolate_npu.cpp b/mmcv/ops/csrc/pytorch/npu/three_interpolate_npu.cpp index 07a5fed04bb..f9087554784 100644 --- a/mmcv/ops/csrc/pytorch/npu/three_interpolate_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/three_interpolate_npu.cpp @@ -1,6 +1,6 @@ #include "pytorch_npu_helper.hpp" -#include "torch_npu/csrc/framework/utils/OpAdapter.h" #include "torch_npu/csrc/aten/NPUNativeFunctions.h" +#include "torch_npu/csrc/framework/utils/OpAdapter.h" using namespace NPU_NAME_SPACE; using namespace std; diff --git a/setup.py b/setup.py index 423ff443bab..1aba8450cd4 100644 --- a/setup.py +++ b/setup.py @@ -425,14 +425,15 @@ def get_mluops_version(file_path): elif (os.getenv('FORCE_NPU', '0') == '1'): print(f'Compiling {ext_name} only with CPU and NPU') try: - import imp + import importlib from torch_npu.utils.cpp_extension import NpuExtension extra_compile_args['cxx'] += [ '-D__FILENAME__=\"$$(notdir $$(abspath $$<))\"' ] extra_compile_args['cxx'] += [ - '-I' + imp.find_module('torch_npu')[1] + + '-I' + importlib.util.find_spec( + 'torch_npu').submodule_search_locations[0] + '/include/third_party/acl/inc' ] define_macros += [('MMCV_WITH_NPU', None)]