From 640aa03538e370d247fc71e3c2aa36c72a82f177 Mon Sep 17 00:00:00 2001 From: lzhangzz Date: Thu, 24 Feb 2022 20:08:44 +0800 Subject: [PATCH] Support Windows (#106) * minor changes * support windows * fix GCC build * fix lint * reformat * fix Windows build * fix GCC build * search backend ops for onnxruntime * fix lint * fix lint * code clean-up * code clean-up * fix clang build * fix trt support * fix cmake for ncnn * fix cmake for openvino * fix SDK Python API * handle ops for other backends (ncnn, trt) * handle SDK Python API library location * robustify linkage * fix cuda * minor fix for openvino & ncnn * use CMAKE_CUDA_ARCHITECTURES if set * fix cuda preprocessor * fix misc * fix pplnn & pplcv, drop support for pplcv<0.6.0 * robustify cmake * update build.md (#2) * build dynamic modules as module library & fix demo (partially) * fix candidate path for mmdeploy_python * move "enable CUDA" to cmake config for demo * refine demo cmake * add comment * fix ubuntu build * revert docs/en/build.md * fix C API * fix lint * Windows build doc (#3) * check in docs related to mmdeploy build on windows * update build guide on windows platform * update build guide on windows platform * make path of thirdparty libraries consistent * make path consistency * correct build command for custom ops * correct build command for sdk * update sdk build instructions * update doc * correct build command * fix lint * correct build command and fix lint Co-authored-by: lvhan * trailing whitespace (#4) * minor fix * fix sr sdk model * fix type deduction * fix cudaFree after driver shutting down * update ppl.cv installation warning (#5) * fix device allocator threshold & fix lint * update doc (#6) * update ppl.cv installation warning * missing 'git clone' Co-authored-by: chenxin Co-authored-by: zhangli Co-authored-by: lvhan028 Co-authored-by: lvhan --- .gitignore | 4 + CMakeLists.txt | 51 ++- cmake/MMDeploy.cmake | 151 ++++++++ cmake/MMDeployConfig.cmake.in | 17 +- cmake/common.cmake | 108 ------ cmake/cuda.cmake | 44 ++- cmake/loader.cpp.in | 39 ++ csrc/CMakeLists.txt | 1 + csrc/apis/c/CMakeLists.txt | 7 +- csrc/apis/c/classifier.cpp | 27 +- csrc/apis/c/classifier.h | 26 +- csrc/apis/c/common.h | 18 +- csrc/apis/c/detector.cpp | 27 +- csrc/apis/c/detector.h | 26 +- csrc/apis/c/handle.h | 4 +- csrc/apis/c/model.cpp | 14 +- csrc/apis/c/model.h | 14 +- csrc/apis/c/restorer.cpp | 8 +- csrc/apis/c/restorer.h | 24 +- csrc/apis/c/segmentor.cpp | 26 +- csrc/apis/c/segmentor.h | 24 +- csrc/apis/c/text_detector.cpp | 29 +- csrc/apis/c/text_detector.h | 29 +- csrc/apis/c/text_recognizer.cpp | 8 +- csrc/apis/c/text_recognizer.h | 35 +- csrc/apis/python/CMakeLists.txt | 11 +- csrc/archive/CMakeLists.txt | 1 - csrc/archive/json_archive.h | 2 +- csrc/archive/value_archive.h | 4 + csrc/backend_ops/CMakeLists.txt | 32 +- csrc/backend_ops/ncnn/CMakeLists.txt | 31 +- csrc/backend_ops/ncnn/ops/CMakeLists.txt | 26 +- csrc/backend_ops/ncnn/ops/ncnn_ops_register.h | 7 +- .../ncnn/pyncnn_ext/CMakeLists.txt | 5 +- csrc/backend_ops/ncnn/pyncnn_ext/ncnn_ext.cpp | 2 +- csrc/backend_ops/onnxruntime/CMakeLists.txt | 31 +- .../onnxruntime/common/onnxruntime_register.h | 5 +- .../onnxruntime/onnxruntime_register.cpp | 1 - csrc/backend_ops/tensorrt/CMakeLists.txt | 40 +-- csrc/codebase/CMakeLists.txt | 4 +- csrc/codebase/common.h | 8 +- csrc/codebase/mmcls/CMakeLists.txt | 6 +- csrc/codebase/mmcls/linear_cls.cpp | 11 +- csrc/codebase/mmcls/mmcls.cpp | 8 +- csrc/codebase/mmcls/mmcls.h | 7 +- csrc/codebase/mmdet/CMakeLists.txt | 9 +- csrc/codebase/mmdet/instance_segmentation.cpp | 32 +- csrc/codebase/mmdet/mmdet.cpp | 8 +- csrc/codebase/mmdet/mmdet.h | 10 +- csrc/codebase/mmdet/object_detection.cpp | 24 +- csrc/codebase/mmedit/CMakeLists.txt | 7 +- csrc/codebase/mmedit/mmedit.cpp | 8 +- csrc/codebase/mmedit/mmedit.h | 8 +- csrc/codebase/mmedit/restorer.cpp | 4 +- csrc/codebase/mmocr/CMakeLists.txt | 8 +- csrc/codebase/mmocr/crnn.cpp | 13 +- csrc/codebase/mmocr/dbnet.cpp | 18 +- csrc/codebase/mmocr/mmocr.cpp | 8 +- csrc/codebase/mmocr/mmocr.h | 8 +- csrc/codebase/mmocr/resize_ocr.cpp | 12 +- csrc/codebase/mmocr/warp.cpp | 2 +- csrc/codebase/mmseg/CMakeLists.txt | 7 +- csrc/codebase/mmseg/mmseg.cpp | 8 +- csrc/codebase/mmseg/mmseg.h | 8 +- csrc/codebase/mmseg/segment.cpp | 20 +- csrc/core/CMakeLists.txt | 17 +- csrc/core/device.h | 19 +- csrc/core/device_impl.cpp | 2 +- csrc/core/graph.cpp | 13 +- csrc/core/graph.h | 20 +- csrc/core/logger.cpp | 3 + csrc/core/logger.h | 20 +- csrc/core/macro.h | 117 +++++- csrc/core/mat.h | 2 +- csrc/core/model.cpp | 32 +- csrc/core/model.h | 9 +- csrc/core/module.cpp | 6 +- csrc/core/module.h | 5 +- csrc/core/net.cpp | 6 +- csrc/core/net.h | 2 + csrc/core/operator.cpp | 2 + csrc/core/operator.h | 25 +- csrc/core/registry.cpp | 46 +++ csrc/core/registry.h | 97 +++-- csrc/core/serialization.h | 64 +--- csrc/core/status_code.h | 5 +- csrc/core/tensor.cpp | 20 +- csrc/core/tensor.h | 2 +- csrc/core/utils/device_utils.h | 6 +- csrc/core/utils/filesystem.h | 15 + csrc/core/utils/formatter.h | 2 +- csrc/core/utils/source_location.h | 2 +- csrc/core/utils/stacktrace.h | 1 + csrc/core/value.h | 8 +- csrc/device/cpu/CMakeLists.txt | 12 +- csrc/device/cuda/CMakeLists.txt | 10 +- csrc/device/cuda/buddy_allocator.h | 13 +- csrc/device/cuda/cuda_builtin_kernels.cu | 6 +- csrc/device/cuda/cuda_device.cpp | 6 +- csrc/device/cuda/cuda_device.h | 10 + csrc/device/cuda/default_allocator.h | 14 +- csrc/device/cuda/linear_allocator.h | 6 +- csrc/device/device_allocator.h | 18 +- csrc/experimental/collection.h | 186 +++++----- csrc/experimental/module_adapter.h | 2 +- csrc/experimental/token.h | 140 ++++---- csrc/graph/CMakeLists.txt | 6 +- csrc/graph/common.cpp | 2 +- csrc/graph/common.h | 5 +- csrc/graph/flatten.cpp | 2 +- csrc/graph/inference.cpp | 4 +- csrc/graph/pipeline.cpp | 4 +- csrc/graph/task.cpp | 7 +- csrc/model/CMakeLists.txt | 15 +- csrc/model/directory_model_impl.cpp | 11 +- csrc/model/zip_model_impl.cpp | 31 +- csrc/net/CMakeLists.txt | 6 +- csrc/net/ncnn/CMakeLists.txt | 28 +- csrc/net/ncnn/ncnn_net.cpp | 5 +- csrc/net/net_module.cpp | 14 +- csrc/net/openvino/CMakeLists.txt | 6 +- csrc/net/openvino/openvino_net.cpp | 32 +- csrc/net/ort/CMakeLists.txt | 14 +- csrc/net/ort/ort_net.cpp | 25 +- csrc/net/ppl/CMakeLists.txt | 8 +- csrc/net/ppl/ppl_net.cpp | 25 +- csrc/net/trt/CMakeLists.txt | 16 +- csrc/net/trt/trt_net.cpp | 25 +- csrc/preprocess/CMakeLists.txt | 7 +- csrc/preprocess/cpu/CMakeLists.txt | 11 +- csrc/preprocess/cpu/pad_impl.cpp | 2 +- csrc/preprocess/cuda/CMakeLists.txt | 38 +- csrc/preprocess/cuda/crop_impl.cpp | 6 +- csrc/preprocess/cuda/load_impl.cpp | 8 +- csrc/preprocess/cuda/normalize.cu | 4 +- csrc/preprocess/cuda/normalize_impl.cpp | 6 +- csrc/preprocess/cuda/pad_impl.cpp | 18 +- csrc/preprocess/cuda/resize_impl.cpp | 20 +- csrc/preprocess/transform/CMakeLists.txt | 26 +- csrc/preprocess/transform/collect.cpp | 8 +- csrc/preprocess/transform/collect.h | 6 +- csrc/preprocess/transform/compose.cpp | 6 +- csrc/preprocess/transform/compose.h | 2 +- csrc/preprocess/transform/crop.cpp | 7 +- csrc/preprocess/transform/crop.h | 8 +- csrc/preprocess/transform/image2tensor.cpp | 7 +- csrc/preprocess/transform/image2tensor.h | 6 +- csrc/preprocess/transform/load.cpp | 9 +- csrc/preprocess/transform/load.h | 6 +- csrc/preprocess/transform/normalize.cpp | 14 +- csrc/preprocess/transform/normalize.h | 6 +- csrc/preprocess/transform/pad.cpp | 10 +- csrc/preprocess/transform/pad.h | 9 +- csrc/preprocess/transform/resize.cpp | 18 +- csrc/preprocess/transform/resize.h | 9 +- csrc/preprocess/transform/transform.cpp | 2 + csrc/preprocess/transform/transform.h | 31 +- csrc/preprocess/transform_module.cpp | 11 +- csrc/utils/CMakeLists.txt | 3 + csrc/utils/opencv/CMakeLists.txt | 17 + .../cpu => utils/opencv}/opencv_utils.cpp | 20 +- .../cpu => utils/opencv}/opencv_utils.h | 38 +- demo/csrc/CMakeLists.txt | 13 +- demo/csrc/image_classification.cpp | 2 +- demo/csrc/image_restorer.cpp | 2 +- demo/csrc/image_segmentation.cpp | 4 +- demo/csrc/object_detection.cpp | 2 +- demo/csrc/ocr.cpp | 2 +- docs/en/build/linux.md | 1 + docs/en/build/windows.md | 1 + docs/zh_cn/build/linux.md | 1 + docs/zh_cn/build/windows.md | 336 ++++++++++++++++++ mmdeploy/backend/ncnn/init_plugins.py | 27 +- mmdeploy/backend/onnxruntime/init_plugins.py | 16 +- mmdeploy/backend/sdk/__init__.py | 20 +- mmdeploy/backend/tensorrt/init_plugins.py | 16 +- .../mmedit/deploy/super_resolution_model.py | 6 +- mmdeploy/utils/__init__.py | 4 +- mmdeploy/utils/utils.py | 20 ++ tests/test_csrc/CMakeLists.txt | 95 ++--- .../test_csrc/archive/test_value_archive.cpp | 14 +- tests/test_csrc/capi/test_classifier.cpp | 10 +- tests/test_csrc/capi/test_detector.cpp | 21 +- tests/test_csrc/capi/test_model.cpp | 2 +- tests/test_csrc/capi/test_restorer.cpp | 4 +- tests/test_csrc/capi/test_segmentor.cpp | 4 +- tests/test_csrc/capi/test_text_detector.cpp | 10 +- tests/test_csrc/capi/test_text_recognizer.cpp | 16 +- tests/test_csrc/core/test_mat.cpp | 2 + tests/test_csrc/core/test_status_code.cpp | 4 +- tests/test_csrc/core/test_token.cpp | 36 -- tests/test_csrc/core/test_value.cpp | 4 +- tests/test_csrc/device/test_cpu_device.cpp | 87 ----- .../test_csrc/model/test_directory_model.cpp | 8 +- tests/test_csrc/model/test_model.cpp | 5 +- tests/test_csrc/model/test_zip_model.cpp | 12 +- tests/test_csrc/net/test_ncnn_net.cpp | 2 +- tests/test_csrc/net/test_openvino_net.cpp | 2 +- tests/test_csrc/net/test_ort_net.cpp | 2 +- tests/test_csrc/net/test_ppl_net.cpp | 2 +- tests/test_csrc/net/test_trt_net.cpp | 2 +- tests/test_csrc/preprocess/test_compose.cpp | 2 +- tests/test_csrc/preprocess/test_crop.cpp | 2 +- .../preprocess/test_image2tensor.cpp | 2 +- tests/test_csrc/preprocess/test_load.cpp | 2 +- tests/test_csrc/preprocess/test_normalize.cpp | 2 +- tests/test_csrc/preprocess/test_pad.cpp | 2 +- tests/test_csrc/preprocess/test_resize.cpp | 2 +- tests/test_csrc/test_resource.h | 43 +-- 209 files changed, 2208 insertions(+), 1572 deletions(-) create mode 100644 cmake/MMDeploy.cmake delete mode 100644 cmake/common.cmake create mode 100644 cmake/loader.cpp.in create mode 100644 csrc/core/registry.cpp create mode 100644 csrc/core/utils/filesystem.h create mode 100644 csrc/utils/CMakeLists.txt create mode 100644 csrc/utils/opencv/CMakeLists.txt rename csrc/{preprocess/cpu => utils/opencv}/opencv_utils.cpp (91%) rename csrc/{preprocess/cpu => utils/opencv}/opencv_utils.h (69%) create mode 100644 docs/en/build/linux.md create mode 100644 docs/en/build/windows.md create mode 100644 docs/zh_cn/build/linux.md create mode 100644 docs/zh_cn/build/windows.md delete mode 100644 tests/test_csrc/core/test_token.cpp diff --git a/.gitignore b/.gitignore index 09a967768..e5d951cc2 100644 --- a/.gitignore +++ b/.gitignore @@ -131,3 +131,7 @@ work_dirs/ # the generated header files /tests/test_csrc/test_define.h + +# +!docs/zh_cn/build +!docs/en/build diff --git a/CMakeLists.txt b/CMakeLists.txt index 2f8f8e467..3ae98d745 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,7 +11,11 @@ set(CMAKE_CXX_STANDARD 17) set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +if (MSVC) + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) +else () + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +endif () set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) # options @@ -39,12 +43,20 @@ endif () # notice that ubsan has linker issues for ubuntu < 18.04, see # https://stackoverflow.com/questions/50024731/ld-unrecognized-option-push-state-no-as-needed if (MMDEPLOY_UBSAN_ENABLE) - add_compile_options($<$:-fsanitize=undefined>) - add_link_options(-fsanitize=undefined) + add_compile_options($<$:-fsanitize=undefined>) + add_link_options(-fsanitize=undefined) +endif () + +if (MSVC) + add_compile_options($<$:/diagnostics:classic>) + add_compile_options($<$:/Zc:preprocessor>) # /experimental:preprocessor on VS2017 + add_compile_options($<$:/wd4251>) +else () + add_compile_options($<$:-fvisibility=hidden>) endif () -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) -# set INTERFACE target to gather linked modules +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) + add_library(MMDeployStaticModules INTERFACE) add_library(MMDeployDynamicModules INTERFACE) add_library(MMDeployLibs INTERFACE) @@ -52,25 +64,10 @@ add_library(MMDeployLibs INTERFACE) add_subdirectory(csrc) if (MMDEPLOY_BUILD_SDK) - # get static modules and dynamic modules from ${MMDeployStaticModules} and ${MMDeployDynamicModules}, respectively - set(STATIC_MODULES) - get_target_property(STATIC_MODULES MMDeployStaticModules INTERFACE_LINK_LIBRARIES) - get_target_list("${STATIC_MODULES}" FILTERED_MODULES) - set(MMDEPLOY_STATIC_MODULES "${FILTERED_MODULES}" CACHE STRING "MMDeploy's static modules") - message(STATUS "MMDEPLOY_STATIC_MODULES: ${MMDEPLOY_STATIC_MODULES}") - - set(DYNAMIC_MODULES) - get_target_property(DYNAMIC_MODULES MMDeployDynamicModules INTERFACE_LINK_LIBRARIES) - get_target_list("${DYNAMIC_MODULES}" FILTERED_MODULES) - set(MMDEPLOY_DYNAMIC_MODULES "${FILTERED_MODULES}" CACHE STRING "MMDeploy's dynamic modules") - message(STATUS "MMDEPLOY_DYNAMIC_MODULES: ${MMDEPLOY_DYNAMIC_MODULES}") - - # get libs from ${MMDeployLibs} - set(LIBS) - get_target_property(LIBS MMDeployLibs INTERFACE_LINK_LIBRARIES) - get_target_list("${LIBS}" FILTERED_LIBS) - set(MMDEPLOY_LIBS "${FILTERED_LIBS}" CACHE STRING "MMDeploy's libs that can be linked directly by application") - message(STATUS "MMDEPLOY_LIBS: ${MMDEPLOY_LIBS}") + install(TARGETS MMDeployStaticModules + MMDeployDynamicModules + MMDeployLibs + EXPORT MMDeployTargets) if (MMDEPLOY_BUILD_TEST) add_subdirectory(tests/test_csrc) @@ -78,13 +75,11 @@ if (MMDEPLOY_BUILD_SDK) if (MMDEPLOY_BUILD_SDK_PYTHON_API) add_subdirectory(csrc/apis/python) - endif() + endif () # export MMDeploy package install(EXPORT MMDeployTargets - # NAMESPACE mmdeploy:: FILE MMDeployTargets.cmake - #EXPORT_LINK_INTERFACE_LIBRARIES DESTINATION lib/cmake/MMDeploy) include(CMakePackageConfigHelpers) @@ -105,6 +100,8 @@ if (MMDEPLOY_BUILD_SDK) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/MMDeployConfig.cmake ${CMAKE_CURRENT_BINARY_DIR}/MMDeployConfigVersion.cmake + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/MMDeploy.cmake + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/loader.cpp.in DESTINATION lib/cmake/MMDeploy ) diff --git a/cmake/MMDeploy.cmake b/cmake/MMDeploy.cmake new file mode 100644 index 000000000..086b45681 --- /dev/null +++ b/cmake/MMDeploy.cmake @@ -0,0 +1,151 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +function (mmdeploy_export NAME) + set(_LIB_DIR lib) + if (MSVC) + set(_LIB_DIR bin) + endif () + install(TARGETS ${NAME} + EXPORT MMDeployTargets + ARCHIVE DESTINATION lib + LIBRARY DESTINATION ${_LIB_DIR} + RUNTIME DESTINATION bin) +endfunction () + + +function (mmdeploy_add_library NAME) + cmake_parse_arguments(_MMDEPLOY "EXCLUDE" "" "" ${ARGN}) + add_library(${NAME} ${_MMDEPLOY_UNPARSED_ARGUMENTS}) + target_compile_definitions(${NAME} PRIVATE -DMMDEPLOY_API_EXPORTS=1) + get_target_property(_TYPE ${NAME} TYPE) + if (_TYPE STREQUAL STATIC_LIBRARY) + set_target_properties(${NAME} PROPERTIES POSITION_INDEPENDENT_CODE 1) + elseif (_TYPE STREQUAL SHARED_LIBRARY) + else () + message(FATAL_ERROR "unsupported type: ${_TYPE}") + endif () + if (NOT _MMDEPLOY_EXCLUDE) + target_link_libraries(MMDeployLibs INTERFACE ${NAME}) + mmdeploy_export(${NAME}) + endif () +endfunction () + + +function (mmdeploy_add_module NAME) + # EXCLUDE: exclude from registering & exporting as SDK module + # LIBRARY: the module is also a library (add_libray with SHARED instead of MODULE) + cmake_parse_arguments(_MMDEPLOY "EXCLUDE;LIBRARY" "" "" ${ARGN}) + # search for add_library keywords + cmake_parse_arguments(_KW "STATIC;SHARED;MODULE" "" "" ${_MMDEPLOY_UNPARSED_ARGUMENTS}) + + set(_MAYBE_MODULE) + # no library type specified + if (NOT (_KW_STATIC OR _KW_SHARED OR _KW_MODULE)) + # shared but not marked as a library, build module library so that no .lib dependency + # will be generated for MSVC + if (MSVC AND BUILD_SHARED_LIBS AND NOT _MMDEPLOY_LIBRARY) + set(_MAYBE_MODULE MODULE) + endif () + endif () + + add_library(${NAME} ${_MAYBE_MODULE} ${_MMDEPLOY_UNPARSED_ARGUMENTS}) + + # automatically link mmdeploy::core if exists + if (TARGET mmdeploy::core) + target_link_libraries(${NAME} PRIVATE mmdeploy::core) + endif () + + # export public symbols when marked as a library + if (_MMDEPLOY_LIBRARY) + target_compile_definitions(${NAME} PRIVATE -DMMDEPLOY_API_EXPORTS=1) + endif () + + get_target_property(_TYPE ${NAME} TYPE) + if (_TYPE STREQUAL STATIC_LIBRARY) + set_target_properties(${NAME} PROPERTIES POSITION_INDEPENDENT_CODE 1) + if (MSVC) + target_link_options(${NAME} INTERFACE "/WHOLEARCHIVE:${NAME}") + endif () + # register static modules + if (NOT _MMDEPLOY_EXCLUDE) + target_link_libraries(MMDeployStaticModules INTERFACE ${NAME}) + endif () + elseif (_TYPE STREQUAL SHARED_LIBRARY OR _TYPE STREQUAL MODULE_LIBRARY) + # register dynamic modules + if (NOT _MMDEPLOY_EXCLUDE) + target_link_libraries(MMDeployDynamicModules INTERFACE ${NAME}) + endif () + else () + message(FATAL_ERROR "unsupported type: ${_TYPE}") + endif () + if (NOT _MMDEPLOY_EXCLUDE) + mmdeploy_export(${NAME}) + endif () +endfunction () + + +function (_mmdeploy_flatten_modules RETVAL) + set(_RETVAL) + foreach (ARG IN LISTS ARGN) + get_target_property(TYPE ${ARG} TYPE) + if (TYPE STREQUAL "INTERFACE_LIBRARY") + get_target_property(LIBS ${ARG} INTERFACE_LINK_LIBRARIES) + if (LIBS) + # pattern for 3.17+ + list(FILTER LIBS EXCLUDE REGEX "^::@") + # pattern for 3.13-3.16 + list(TRANSFORM LIBS REPLACE "(.+)::@.*" "\\1") + list(APPEND _RETVAL ${LIBS}) + endif () + else () + list(APPEND _RETVAL ${ARG}) + endif () + endforeach () + set(${RETVAL} ${_RETVAL} PARENT_SCOPE) +endfunction () + + +function (mmdeploy_load_static NAME) + if (MSVC) + target_link_libraries(${NAME} PRIVATE ${ARGN}) + else () + _mmdeploy_flatten_modules(_MODULE_LIST ${ARGN}) + target_link_libraries(${NAME} PRIVATE + -Wl,--whole-archive + ${_MODULE_LIST} + -Wl,--no-whole-archive) + endif () +endfunction () + +function (mmdeploy_load_dynamic NAME) + _mmdeploy_flatten_modules(_MODULE_LIST ${ARGN}) + if (MSVC) + if (NOT _MODULE_LIST) + return () + endif () + # MSVC has nothing like "-Wl,--no-as-needed ... -Wl,--as-needed", as a + # workaround we build a static module which loads the dynamic modules + set(_MODULE_STR ${_MODULE_LIST}) + list(TRANSFORM _MODULE_STR REPLACE "(.+)" "\"\\1\"") + string(JOIN ",\n " _MODULE_STR ${_MODULE_STR}) + set(_MMDEPLOY_DYNAMIC_MODULES ${_MODULE_STR}) + + set(_LOADER_NAME ${NAME}_loader) + + add_dependencies(${NAME} ${_MODULE_LIST}) + + set(_LOADER_PATH ${CMAKE_BINARY_DIR}/${_LOADER_NAME}.cpp) + # ! CMAKE_CURRENT_FUNCTION_LIST_DIR requires cmake 3.17+ + configure_file( + ${CMAKE_CURRENT_FUNCTION_LIST_DIR}/loader.cpp.in + ${_LOADER_PATH}) + + mmdeploy_add_module(${_LOADER_NAME} STATIC EXCLUDE ${_LOADER_PATH}) + mmdeploy_load_static(${NAME} ${_LOADER_NAME}) + else () + target_link_libraries(${NAME} PRIVATE + -Wl,--no-as-needed + ${_MODULE_LIST} + -Wl,--as-needed) + endif () +endfunction () diff --git a/cmake/MMDeployConfig.cmake.in b/cmake/MMDeployConfig.cmake.in index 7a23a9e15..4bd05489e 100644 --- a/cmake/MMDeployConfig.cmake.in +++ b/cmake/MMDeployConfig.cmake.in @@ -2,23 +2,26 @@ cmake_minimum_required(VERSION 3.14) -include ("${CMAKE_CURRENT_LIST_DIR}/MMDeployTargets.cmake") +include("${CMAKE_CURRENT_LIST_DIR}/MMDeployTargets.cmake") set(MMDEPLOY_CODEBASES @MMDEPLOY_CODEBASES@) set(MMDEPLOY_TARGET_DEVICES @MMDEPLOY_TARGET_DEVICES@) set(MMDEPLOY_TARGET_BACKENDS @MMDEPLOY_TARGET_BACKENDS@) set(MMDEPLOY_BUILD_TYPE @CMAKE_BUILD_TYPE@) -set(MMDEPLOY_STATIC_MODULES @MMDEPLOY_STATIC_MODULES@) -set(MMDEPLOY_DYNAMIC_MODULES @MMDEPLOY_DYNAMIC_MODULES@) set(MMDEPLOY_BUILD_SHARED @BUILD_SHARED_LIBS@) -set(MMDEPLOY_LIBS @MMDEPLOY_LIBS@) if (NOT MMDEPLOY_BUILD_SHARED) if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES) + set(CMAKE_CUDA_RUNTIME_LIBRARY Shared) + enable_language(CUDA) find_package(pplcv REQUIRED) endif () endif () -set(MMDeploy_LIBS ${MMDEPLOY_LIBS} - -Wl,--no-as-needed ${MMDEPLOY_DYNAMIC_MODULES} -Wl,--as-needed - -Wl,--whole-archive ${MMDEPLOY_STATIC_MODULES} -Wl,--no-whole-archive) +find_package(spdlog REQUIRED) +find_package(OpenCV REQUIRED) + +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) + +include("${CMAKE_CURRENT_LIST_DIR}/MMDeploy.cmake") diff --git a/cmake/common.cmake b/cmake/common.cmake deleted file mode 100644 index fae162bfe..000000000 --- a/cmake/common.cmake +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -function(set_targets PROJECT_NAME OBJ_TARGET STATIC_TARGET SHARED_TARGET) - set(${OBJ_TARGET} ${PROJECT_NAME}_obj PARENT_SCOPE) - set(${STATIC_TARGET} ${PROJECT_NAME}_static PARENT_SCOPE) - set(${SHARED_TARGET} ${PROJECT_NAME} PARENT_SCOPE) -endfunction() - -function(install_targets TARGET_NAMES) - foreach (TARGET_NAME ${TARGET_NAMES}) - install(TARGETS ${TARGET_NAME} - ARCHIVE DESTINATION lib - LIBRARY DESTINATION lib - RUNTIME DESTINATION bin - ) - endforeach () -endfunction() - -function(build_target TARGET_NAME TARGET_SRCS) - add_library(${TARGET_NAME} ${TARGET_SRCS}) - set_target_properties(${TARGET_NAME} PROPERTIES POSITION_INDEPENDENT_CODE 1) -endfunction() - -# When the object target ${TARGET_NAME} has more than one source file, -# "${SRCS_VARIABLE}" MUST be passed to ${TARGET_SRCS}. The quotation marks CANNOT be dismissed. -function(build_object_target TARGET_NAME TARGET_SRCS) - add_library(${TARGET_NAME} OBJECT) - target_sources(${TARGET_NAME} PRIVATE ${TARGET_SRCS}) - set_target_properties(${TARGET_NAME} PROPERTIES POSITION_INDEPENDENT_CODE 1) -endfunction() - -function(build_static_target TARGET_NAME OBJECT_TARGET LINK_TYPE) - add_library(${TARGET_NAME} STATIC $) - if (${LINK_TYPE} STREQUAL "PRIVATE") - target_link_libraries(${TARGET_NAME} PRIVATE ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "PUBLIC") - target_link_libraries(${TARGET_NAME} PUBLIC ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "INTERFACE") - target_link_libraries(${TARGET_NAME} INTERFACE ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "") - target_link_libraries(${TARGET_NAME} ${OBJECT_TARGET}) - else () - message(FATAL_ERROR "Incorrect link type: ${LINK_TYPE}") - endif () -endfunction() - -function(build_shared_target TARGET_NAME OBJECT_TARGET LINK_TYPE) - add_library(${TARGET_NAME} SHARED $) - if (${LINK_TYPE} STREQUAL "PRIVATE") - target_link_libraries(${TARGET_NAME} PRIVATE ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "PUBLIC") - target_link_libraries(${TARGET_NAME} PUBLIC ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "INTERFACE") - target_link_libraries(${TARGET_NAME} INTERFACE ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "") - target_link_libraries(${TARGET_NAME} ${OBJECT_TARGET}) - else () - message(FATAL_ERROR "Incorrect link type: ${LINK_TYPE}") - endif () -endfunction() - -function(build_module_target TARGET_NAME OBJECT_TARGET LINK_TYPE) - add_library(${TARGET_NAME} MODULE $) - if (${LINK_TYPE} STREQUAL "PRIVATE") - target_link_libraries(${TARGET_NAME} PRIVATE ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "PUBLIC") - target_link_libraries(${TARGET_NAME} PUBLIC ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "INTERFACE") - target_link_libraries(${TARGET_NAME} INTERFACE ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "") - target_link_libraries(${TARGET_NAME} ${OBJECT_TARGET}) - else () - message(FATAL_ERROR "Incorrect link type: ${LINK_TYPE}") - endif () -endfunction() - - -function(export_target TARGET_NAME) - target_link_libraries(MMDeployLibs INTERFACE ${TARGET_NAME}) - install(TARGETS ${TARGET_NAME} - EXPORT MMDeployTargets - ARCHIVE DESTINATION lib - LIBRARY DESTINATION lib - ) -endfunction() - -function(export_module TARGET_NAME) - get_target_property(TARGET_TYPE ${TARGET_NAME} TYPE) - if (${TARGET_TYPE} STREQUAL "STATIC_LIBRARY") - target_link_libraries(MMDeployStaticModules INTERFACE ${TARGET_NAME}) - elseif (${TARGET_TYPE} STREQUAL "SHARED_LIBRARY") - target_link_libraries(MMDeployDynamicModules INTERFACE ${TARGET_NAME}) - endif () - install(TARGETS ${TARGET_NAME} - EXPORT MMDeployTargets - ARCHIVE DESTINATION lib - LIBRARY DESTINATION lib - ) -endfunction() - -function(get_target_list INPUT_TARGETS OUTPUT_TARGETS) - set(FILTERED_TARGETS) - foreach (INPUT_TARGET IN LISTS INPUT_TARGETS) - if (TARGET ${INPUT_TARGET}) - list(APPEND FILTERED_TARGETS ${INPUT_TARGET}) - endif() - endforeach () - set(${OUTPUT_TARGETS} "${FILTERED_TARGETS}" PARENT_SCOPE) -endfunction() diff --git a/cmake/cuda.cmake b/cmake/cuda.cmake index 158e542e1..9fe42596c 100644 --- a/cmake/cuda.cmake +++ b/cmake/cuda.cmake @@ -23,35 +23,41 @@ else () set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER}) set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler=-fPIC,-Wall,-fvisibility=hidden") - set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler=-fno-gnu-unique") + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU") + set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler=-fno-gnu-unique") + endif () endif () enable_language(CUDA) # set virtual compute architecture and real ones set(_NVCC_FLAGS) -set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_52,code=sm_52") -if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "8") - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_60,code=sm_60") - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_61,code=sm_61") -endif () -if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "9") - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_70,code=sm_70") -endif () -if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "10") - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_72,code=sm_72") - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_75,code=sm_75") -endif () -if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "11") - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_80,code=sm_80") - if (CUDA_VERSION_MINOR VERSION_GREATER_EQUAL "1") - # cuda doesn't support `sm_86` until version 11.1 - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_86,code=sm_86") +if (NOT CMAKE_CUDA_ARCHITECTURES) + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_52,code=sm_52") + if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "8") + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_60,code=sm_60") + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_61,code=sm_61") + endif () + if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "9") + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_70,code=sm_70") + endif () + if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "10") + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_72,code=sm_72") + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_75,code=sm_75") + endif () + if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "11") + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_80,code=sm_80") + if (CUDA_VERSION_MINOR VERSION_GREATER_EQUAL "1") + # cuda doesn't support `sm_86` until version 11.1 + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_86,code=sm_86") + endif () endif () endif () set(CUDA_NVCC_FLAGS_DEBUG "-g -O0") set(CUDA_NVCC_FLAGS_RELEASE "-O3") set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}") -set(CMAKE_CUDA_STANDARD 14) +if (NOT MSVC) + set(CMAKE_CUDA_STANDARD 14) +endif () set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} ${CUDA_NVCC_FLAGS} ${_NVCC_FLAGS}") diff --git a/cmake/loader.cpp.in b/cmake/loader.cpp.in new file mode 100644 index 000000000..6627d6e2e --- /dev/null +++ b/cmake/loader.cpp.in @@ -0,0 +1,39 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#include + +#include + +namespace mmdeploy { +namespace { + +void* mmdeploy_load_library(const char* name) { + fprintf(stderr, "loading %s ...\n", name); + auto handle = LoadLibraryA(name); + if (!handle) { + fprintf(stderr, "failed to load library %s\n", name); + return nullptr; + } + return handle; +} + +// clang-format off + +class Loader { + public: + Loader() { + const char* modules[] = { + @_MMDEPLOY_DYNAMIC_MODULES@ + }; + for (const auto name : modules) { + mmdeploy_load_library(name); + } + } +}; + +// clang-format on + +static Loader loader; + +} // namespace +} // namespace mmdeploy diff --git a/csrc/CMakeLists.txt b/csrc/CMakeLists.txt index 889e54bb5..b14c81c13 100644 --- a/csrc/CMakeLists.txt +++ b/csrc/CMakeLists.txt @@ -4,6 +4,7 @@ add_subdirectory(backend_ops) if (MMDEPLOY_BUILD_SDK) add_subdirectory(core) + add_subdirectory(utils) add_subdirectory(archive) add_subdirectory(device) add_subdirectory(graph) diff --git a/csrc/apis/c/CMakeLists.txt b/csrc/apis/c/CMakeLists.txt index 81da0a3fb..f1809995b 100644 --- a/csrc/apis/c/CMakeLists.txt +++ b/csrc/apis/c/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.14) project(capis) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) if ("all" IN_LIST MMDEPLOY_CODEBASES) set(TASK_LIST "classifier;detector;segmentor;text_detector;text_recognizer;restorer;model") @@ -28,16 +28,13 @@ endif () foreach (TASK ${TASK_LIST}) set(TARGET_NAME mmdeploy_${TASK}) - build_target(${TARGET_NAME} ${TASK}.cpp) + mmdeploy_add_library(${TARGET_NAME} ${TASK}.cpp) target_link_libraries(${TARGET_NAME} PRIVATE mmdeploy::core) target_include_directories(${TARGET_NAME} PUBLIC $ $) - export_target(${TARGET_NAME}) - install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${TASK}.h DESTINATION include/c) - endforeach () install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/common.h diff --git a/csrc/apis/c/classifier.cpp b/csrc/apis/c/classifier.cpp index 9236f5eae..ecdfaafc8 100644 --- a/csrc/apis/c/classifier.cpp +++ b/csrc/apis/c/classifier.cpp @@ -55,28 +55,28 @@ int mmdeploy_classifier_create_impl(ModelType&& m, const char* device_name, int return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } } // namespace -MM_SDK_API int mmdeploy_classifier_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { +int mmdeploy_classifier_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle) { return mmdeploy_classifier_create_impl(*static_cast(model), device_name, device_id, handle); } -MM_SDK_API int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle) { +int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle) { return mmdeploy_classifier_create_impl(model_path, device_name, device_id, handle); } -MM_SDK_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_class_t** results, int** result_count) { +int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_class_t** results, int** result_count) { if (handle == nullptr || mats == nullptr || mat_count == 0) { return MM_E_INVALID_ARG; } @@ -92,7 +92,7 @@ MM_SDK_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mat } auto output = classifier->Run(std::move(input)).value().front(); - DEBUG("output: {}", output); + MMDEPLOY_DEBUG("output: {}", output); auto classify_outputs = from_value>(output); @@ -124,20 +124,19 @@ MM_SDK_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mat return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } -MM_SDK_API void mmdeploy_classifier_release_result(mm_class_t* results, const int* result_count, - int count) { +void mmdeploy_classifier_release_result(mm_class_t* results, const int* result_count, int count) { delete[] results; delete[] result_count; } -MM_SDK_API void mmdeploy_classifier_destroy(mm_handle_t handle) { +void mmdeploy_classifier_destroy(mm_handle_t handle) { if (handle != nullptr) { auto classifier = static_cast(handle); delete classifier; diff --git a/csrc/apis/c/classifier.h b/csrc/apis/c/classifier.h index 6834b8e40..a2209792b 100644 --- a/csrc/apis/c/classifier.h +++ b/csrc/apis/c/classifier.h @@ -10,6 +10,10 @@ #include "common.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef struct mm_class_t { int label_id; float score; @@ -25,8 +29,8 @@ typedef struct mm_class_t { * by \ref mmdeploy_classifier_destroy * @return status of creating classifier's handle */ -MM_SDK_API int mmdeploy_classifier_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_classifier_create(mm_model_t model, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Create classifier's handle @@ -37,8 +41,8 @@ MM_SDK_API int mmdeploy_classifier_create(mm_model_t model, const char* device_n * by \ref mmdeploy_classifier_destroy * @return status of creating classifier's handle */ -MM_SDK_API int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Use classifier created by \ref mmdeploy_classifier_create_by_path to get label @@ -53,8 +57,8 @@ MM_SDK_API int mmdeploy_classifier_create_by_path(const char* model_path, const * mmdeploy_classifier_release_result * @return status of inference */ -MM_SDK_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_class_t** results, int** result_count); +MMDEPLOY_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_class_t** results, int** result_count); /** * @brief Release the inference result buffer created \ref mmdeploy_classifier_apply @@ -62,13 +66,17 @@ MM_SDK_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mat * @param[in] result_count \p results size buffer * @param[in] count length of \p result_count */ -MM_SDK_API void mmdeploy_classifier_release_result(mm_class_t* results, const int* result_count, - int count); +MMDEPLOY_API void mmdeploy_classifier_release_result(mm_class_t* results, const int* result_count, + int count); /** * @brief Destroy classifier's handle * @param[in] handle classifier's handle created by \ref mmdeploy_classifier_create_by_path */ -MM_SDK_API void mmdeploy_classifier_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_classifier_destroy(mm_handle_t handle); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_CLASSIFIER_H diff --git a/csrc/apis/c/common.h b/csrc/apis/c/common.h index 1809f7772..dc82d4429 100644 --- a/csrc/apis/c/common.h +++ b/csrc/apis/c/common.h @@ -3,9 +3,23 @@ #ifndef MMDEPLOY_COMMON_H #define MMDEPLOY_COMMON_H -#include +#include -#define MM_SDK_API +#ifndef MMDEPLOY_EXPORT +#ifdef _MSC_VER +#define MMDEPLOY_EXPORT __declspec(dllexport) +#else +#define MMDEPLOY_EXPORT __attribute__((visibility("default"))) +#endif +#endif + +#ifndef MMDEPLOY_API +#ifdef MMDEPLOY_API_EXPORTS +#define MMDEPLOY_API MMDEPLOY_EXPORT +#else +#define MMDEPLOY_API +#endif +#endif // clang-format off diff --git a/csrc/apis/c/detector.cpp b/csrc/apis/c/detector.cpp index 190b8bf7d..4dbb573f9 100644 --- a/csrc/apis/c/detector.cpp +++ b/csrc/apis/c/detector.cpp @@ -55,27 +55,27 @@ int mmdeploy_detector_create_impl(ModelType&& m, const char* device_name, int de return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } } // namespace -MM_SDK_API int mmdeploy_detector_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { +int mmdeploy_detector_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle) { return mmdeploy_detector_create_impl(*static_cast(model), device_name, device_id, handle); } -MM_SDK_API int mmdeploy_detector_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle) { +int mmdeploy_detector_create_by_path(const char* model_path, const char* device_name, int device_id, + mm_handle_t* handle) { return mmdeploy_detector_create_impl(model_path, device_name, device_id, handle); } -MM_SDK_API int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_detect_t** results, int** result_count) { +int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_detect_t** results, int** result_count) { if (handle == nullptr || mats == nullptr || mat_count == 0) { return MM_E_INVALID_ARG; } @@ -91,7 +91,7 @@ MM_SDK_API int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, } auto output = detector->Run(std::move(input)).value().front(); - DEBUG("output: {}", output); + MMDEPLOY_DEBUG("output: {}", output); auto detector_outputs = from_value>(output); @@ -142,15 +142,14 @@ MM_SDK_API int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } -MM_SDK_API void mmdeploy_detector_release_result(mm_detect_t* results, const int* result_count, - int count) { +void mmdeploy_detector_release_result(mm_detect_t* results, const int* result_count, int count) { auto result_ptr = results; for (int i = 0; i < count; ++i) { for (int j = 0; j < result_count[i]; ++j, ++result_ptr) { @@ -164,7 +163,7 @@ MM_SDK_API void mmdeploy_detector_release_result(mm_detect_t* results, const int delete[] result_count; } -MM_SDK_API void mmdeploy_detector_destroy(mm_handle_t handle) { +void mmdeploy_detector_destroy(mm_handle_t handle) { if (handle != nullptr) { auto detector = static_cast(handle); delete detector; diff --git a/csrc/apis/c/detector.h b/csrc/apis/c/detector.h index 59689dd0d..bfcf0a8ac 100644 --- a/csrc/apis/c/detector.h +++ b/csrc/apis/c/detector.h @@ -10,6 +10,10 @@ #include "common.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef struct mm_instance_mask_t { char* data; int height; @@ -32,8 +36,8 @@ typedef struct mm_detect_t { * @param[out] handle instance of a detector * @return status of creating detector's handle */ -MM_SDK_API int mmdeploy_detector_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_detector_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle); /** * @brief Create detector's handle @@ -43,8 +47,8 @@ MM_SDK_API int mmdeploy_detector_create(mm_model_t model, const char* device_nam * @param[out] handle instance of a detector * @return status of creating detector's handle */ -MM_SDK_API int mmdeploy_detector_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_detector_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Apply detector to batch images and get their inference results @@ -58,21 +62,25 @@ MM_SDK_API int mmdeploy_detector_create_by_path(const char* model_path, const ch * mmdeploy_detector_release_result * @return status of inference */ -MM_SDK_API int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_detect_t** results, int** result_count); +MMDEPLOY_API int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_detect_t** results, int** result_count); /** @brief Release the inference result buffer created by \ref mmdeploy_detector_apply * @param[in] results detection results buffer * @param[in] result_count \p results size buffer * @param[in] count length of \p result_count */ -MM_SDK_API void mmdeploy_detector_release_result(mm_detect_t* results, const int* result_count, - int count); +MMDEPLOY_API void mmdeploy_detector_release_result(mm_detect_t* results, const int* result_count, + int count); /** * @brief Destroy detector's handle * @param[in] handle detector's handle created by \ref mmdeploy_detector_create_by_path */ -MM_SDK_API void mmdeploy_detector_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_detector_destroy(mm_handle_t handle); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_DETECTOR_H diff --git a/csrc/apis/c/handle.h b/csrc/apis/c/handle.h index 2af9c0da5..422345299 100644 --- a/csrc/apis/c/handle.h +++ b/csrc/apis/c/handle.h @@ -20,12 +20,12 @@ class Handle { config["context"].update({{"device", device_}, {"stream", stream_}}); auto creator = Registry::Get().GetCreator("Pipeline"); if (!creator) { - ERROR("failed to find Pipeline creator"); + MMDEPLOY_ERROR("failed to find Pipeline creator"); throw_exception(eEntryNotFound); } pipeline_ = creator->Create(config); if (!pipeline_) { - ERROR("create pipeline failed"); + MMDEPLOY_ERROR("create pipeline failed"); throw_exception(eFail); } pipeline_->Build(graph_); diff --git a/csrc/apis/c/model.cpp b/csrc/apis/c/model.cpp index 9834071c7..5101b92a4 100644 --- a/csrc/apis/c/model.cpp +++ b/csrc/apis/c/model.cpp @@ -1,11 +1,13 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include "core/model.h" +// clang-format off +#include "model.h" #include #include "core/logger.h" -#include "model.h" +#include "core/model.h" +// clang-format on using namespace mmdeploy; @@ -15,9 +17,9 @@ int mmdeploy_model_create_by_path(const char *path, mm_model_t *model) { *model = ptr.release(); return MM_SUCCESS; } catch (const std::exception &e) { - ERROR("failed to create model: {}", e.what()); + MMDEPLOY_ERROR("failed to create model: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } @@ -28,9 +30,9 @@ int mmdeploy_model_create(const void *buffer, int size, mm_model_t *model) { *model = ptr.release(); return MM_SUCCESS; } catch (const std::exception &e) { - ERROR("failed to create model: {}", e.what()); + MMDEPLOY_ERROR("failed to create model: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } diff --git a/csrc/apis/c/model.h b/csrc/apis/c/model.h index 731bb0270..6151ba43a 100644 --- a/csrc/apis/c/model.h +++ b/csrc/apis/c/model.h @@ -10,13 +10,17 @@ #include "common.h" +#ifdef __cplusplus +extern "C" { +#endif + /** * @brief Create SDK Model instance from given model path * @param[in] path model path * @param[out] model sdk model instance that must be destroyed by \ref mmdeploy_model_destroy * @return status code of the operation */ -MM_SDK_API int mmdeploy_model_create_by_path(const char* path, mm_model_t* model); +MMDEPLOY_API int mmdeploy_model_create_by_path(const char* path, mm_model_t* model); /** * @brief Create SDK Model instance from memory @@ -25,13 +29,17 @@ MM_SDK_API int mmdeploy_model_create_by_path(const char* path, mm_model_t* model * @param[out] model sdk model instance that must be destroyed by \ref mmdeploy_model_destroy * @return status code of the operation */ -MM_SDK_API int mmdeploy_model_create(const void* buffer, int size, mm_model_t* model); +MMDEPLOY_API int mmdeploy_model_create(const void* buffer, int size, mm_model_t* model); /** * @brief Destroy model instance * @param[in] model sdk model instance created by \ref mmdeploy_model_create_by_path or \ref * mmdeploy_model_create */ -MM_SDK_API void mmdeploy_model_destroy(mm_model_t model); +MMDEPLOY_API void mmdeploy_model_destroy(mm_model_t model); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_SRC_APIS_C_MODEL_H_ diff --git a/csrc/apis/c/restorer.cpp b/csrc/apis/c/restorer.cpp index 0e12fa02c..95e367958 100644 --- a/csrc/apis/c/restorer.cpp +++ b/csrc/apis/c/restorer.cpp @@ -51,9 +51,9 @@ int mmdeploy_restorer_create_impl(ModelType &&m, const char *device_name, int de return MM_SUCCESS; } catch (const std::exception &e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } @@ -105,9 +105,9 @@ int mmdeploy_restorer_apply(mm_handle_t handle, const mm_mat_t *images, int coun *results = _results.release(); return MM_SUCCESS; } catch (const std::exception &e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } diff --git a/csrc/apis/c/restorer.h b/csrc/apis/c/restorer.h index 5d568cd41..4ae491a23 100644 --- a/csrc/apis/c/restorer.h +++ b/csrc/apis/c/restorer.h @@ -10,6 +10,10 @@ #include "common.h" +#ifdef __cplusplus +extern "C" { +#endif + /** * @brief Create a restorer instance * @param[in] model an instance of image restoration model created by @@ -20,8 +24,8 @@ * by \ref mmdeploy_restorer_destroy * @return status code of the operation */ -MM_SDK_API int mmdeploy_restorer_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_restorer_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle); /** * @brief Create a restorer instance @@ -32,8 +36,8 @@ MM_SDK_API int mmdeploy_restorer_create(mm_model_t model, const char* device_nam * by \ref mmdeploy_restorer_destroy * @return status code of the operation */ -MM_SDK_API int mmdeploy_restorer_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_restorer_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Apply restorer to a batch of images @@ -44,19 +48,23 @@ MM_SDK_API int mmdeploy_restorer_create_by_path(const char* model_path, const ch * by \ref mmdeploy_restorer_release_result * @return status code of the operation */ -MM_SDK_API int mmdeploy_restorer_apply(mm_handle_t handle, const mm_mat_t* images, int count, - mm_mat_t** results); +MMDEPLOY_API int mmdeploy_restorer_apply(mm_handle_t handle, const mm_mat_t* images, int count, + mm_mat_t** results); /** @brief Release result buffer returned by \ref mmdeploy_restorer_apply * @param[in] results result buffer by restorer * @param[in] count length of \p result */ -MM_SDK_API void mmdeploy_restorer_release_result(mm_mat_t* results, int count); +MMDEPLOY_API void mmdeploy_restorer_release_result(mm_mat_t* results, int count); /** * @brief destroy restorer * @param[in] handle handle of restorer created by \ref mmdeploy_restorer_create_by_path */ -MM_SDK_API void mmdeploy_restorer_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_restorer_destroy(mm_handle_t handle); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_SRC_APIS_C_RESTORER_H_ diff --git a/csrc/apis/c/segmentor.cpp b/csrc/apis/c/segmentor.cpp index 2c578de32..bcdca722a 100644 --- a/csrc/apis/c/segmentor.cpp +++ b/csrc/apis/c/segmentor.cpp @@ -53,28 +53,28 @@ int mmdeploy_segmentor_create_impl(ModelType&& m, const char* device_name, int d return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } } // namespace -MM_SDK_API int mmdeploy_segmentor_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { +int mmdeploy_segmentor_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle) { return mmdeploy_segmentor_create_impl(*static_cast(model), device_name, device_id, handle); } -MM_SDK_API int mmdeploy_segmentor_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle) { +int mmdeploy_segmentor_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle) { return mmdeploy_segmentor_create_impl(model_path, device_name, device_id, handle); } -MM_SDK_API int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_segment_t** results) { +int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_segment_t** results) { if (handle == nullptr || mats == nullptr || mat_count == 0 || results == nullptr) { return MM_E_INVALID_ARG; } @@ -97,7 +97,7 @@ MM_SDK_API int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats auto results_ptr = _results.get(); for (auto i = 0; i < mat_count; ++i, ++results_ptr) { auto& output_item = output[i]; - DEBUG("the {}-th item in output: {}", i, output_item); + MMDEPLOY_DEBUG("the {}-th item in output: {}", i, output_item); auto segmentor_output = from_value(output_item); results_ptr->height = segmentor_output.height; results_ptr->width = segmentor_output.width; @@ -110,14 +110,14 @@ MM_SDK_API int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } -MM_SDK_API void mmdeploy_segmentor_release_result(mm_segment_t* results, int count) { +void mmdeploy_segmentor_release_result(mm_segment_t* results, int count) { if (results == nullptr) { return; } @@ -128,7 +128,7 @@ MM_SDK_API void mmdeploy_segmentor_release_result(mm_segment_t* results, int cou delete[] results; } -MM_SDK_API void mmdeploy_segmentor_destroy(mm_handle_t handle) { +void mmdeploy_segmentor_destroy(mm_handle_t handle) { if (handle != nullptr) { auto segmentor = static_cast(handle); delete segmentor; diff --git a/csrc/apis/c/segmentor.h b/csrc/apis/c/segmentor.h index 4abcd3cf6..741fbd963 100644 --- a/csrc/apis/c/segmentor.h +++ b/csrc/apis/c/segmentor.h @@ -10,6 +10,10 @@ #include "common.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef struct mm_segment_t { int height; ///< height of \p mask that equals to the input image's height int width; ///< width of \p mask that equals to the input image's width @@ -28,8 +32,8 @@ typedef struct mm_segment_t { * by \ref mmdeploy_segmentor_destroy * @return status of creating segmentor's handle */ -MM_SDK_API int mmdeploy_segmentor_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_segmentor_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle); /** * @brief Create segmentor's handle @@ -40,8 +44,8 @@ MM_SDK_API int mmdeploy_segmentor_create(mm_model_t model, const char* device_na * by \ref mmdeploy_segmentor_destroy * @return status of creating segmentor's handle */ -MM_SDK_API int mmdeploy_segmentor_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_segmentor_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Apply segmentor to batch images and get their inference results @@ -53,19 +57,23 @@ MM_SDK_API int mmdeploy_segmentor_create_by_path(const char* model_path, const c * image. It must be released by \ref mmdeploy_segmentor_release_result * @return status of inference */ -MM_SDK_API int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_segment_t** results); +MMDEPLOY_API int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_segment_t** results); /** @brief Release result buffer returned by \ref mmdeploy_segmentor_apply * @param[in] results result buffer * @param[in] count length of \p results */ -MM_SDK_API void mmdeploy_segmentor_release_result(mm_segment_t* results, int count); +MMDEPLOY_API void mmdeploy_segmentor_release_result(mm_segment_t* results, int count); /** * @brief Destroy segmentor's handle * @param[in] handle segmentor's handle created by \ref mmdeploy_segmentor_create_by_path */ -MM_SDK_API void mmdeploy_segmentor_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_segmentor_destroy(mm_handle_t handle); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_SEGMENTOR_H diff --git a/csrc/apis/c/text_detector.cpp b/csrc/apis/c/text_detector.cpp index 1f0479f30..6369044bf 100644 --- a/csrc/apis/c/text_detector.cpp +++ b/csrc/apis/c/text_detector.cpp @@ -53,29 +53,28 @@ int mmdeploy_text_detector_create_impl(ModelType&& m, const char* device_name, i return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } } // namespace -MM_SDK_API int mmdeploy_text_detector_create(mm_model_t model, const char* device_name, - int device_id, mm_handle_t* handle) { +int mmdeploy_text_detector_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle) { return mmdeploy_text_detector_create_impl(*static_cast(model), device_name, device_id, handle); } -MM_SDK_API int mmdeploy_text_detector_create_by_path(const char* model_path, - const char* device_name, int device_id, - mm_handle_t* handle) { +int mmdeploy_text_detector_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle) { return mmdeploy_text_detector_create_impl(model_path, device_name, device_id, handle); } -MM_SDK_API int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_text_detect_t** results, int** result_count) { +int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_text_detect_t** results, int** result_count) { if (handle == nullptr || mats == nullptr || mat_count == 0) { return MM_E_INVALID_ARG; } @@ -91,7 +90,7 @@ MM_SDK_API int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* } auto output = text_detector->Run(std::move(input)).value().front(); - DEBUG("output: {}", output); + MMDEPLOY_DEBUG("output: {}", output); auto detector_outputs = from_value>(output); vector _result_count; @@ -125,20 +124,20 @@ MM_SDK_API int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } -MM_SDK_API void mmdeploy_text_detector_release_result(mm_text_detect_t* results, - const int* result_count, int count) { +void mmdeploy_text_detector_release_result(mm_text_detect_t* results, const int* result_count, + int count) { delete[] results; delete[] result_count; } -MM_SDK_API void mmdeploy_text_detector_destroy(mm_handle_t handle) { +void mmdeploy_text_detector_destroy(mm_handle_t handle) { if (handle != nullptr) { auto text_detector = static_cast(handle); delete text_detector; diff --git a/csrc/apis/c/text_detector.h b/csrc/apis/c/text_detector.h index 06cb78558..0ca39b900 100644 --- a/csrc/apis/c/text_detector.h +++ b/csrc/apis/c/text_detector.h @@ -10,6 +10,10 @@ #include "common.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef struct mm_text_detect_t { mm_pointf_t bbox[4]; ///< a text bounding box of which the vertex are in clock-wise float score; @@ -25,8 +29,8 @@ typedef struct mm_text_detect_t { * by \ref mmdeploy_text_detector_destroy * @return status of creating text-detector's handle */ -MM_SDK_API int mmdeploy_text_detector_create(mm_model_t model, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_text_detector_create(mm_model_t model, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Create text-detector's handle @@ -37,9 +41,9 @@ MM_SDK_API int mmdeploy_text_detector_create(mm_model_t model, const char* devic * by \ref mmdeploy_text_detector_destroy * @return status of creating text-detector's handle */ -MM_SDK_API int mmdeploy_text_detector_create_by_path(const char* model_path, - const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_text_detector_create_by_path(const char* model_path, + const char* device_name, int device_id, + mm_handle_t* handle); /** * @brief Apply text-detector to batch images and get their inference results @@ -52,22 +56,27 @@ MM_SDK_API int mmdeploy_text_detector_create_by_path(const char* model_path, * results of each image. It must be released by \ref mmdeploy_detector_release_result * @return status of inference */ -MM_SDK_API int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_text_detect_t** results, int** result_count); +MMDEPLOY_API int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* mats, + int mat_count, mm_text_detect_t** results, + int** result_count); /** @brief Release the inference result buffer returned by \ref mmdeploy_text_detector_apply * @param[in] results text detection result buffer * @param[in] result_count \p results size buffer * @param[in] count the length of buffer \p result_count */ -MM_SDK_API void mmdeploy_text_detector_release_result(mm_text_detect_t* results, - const int* result_count, int count); +MMDEPLOY_API void mmdeploy_text_detector_release_result(mm_text_detect_t* results, + const int* result_count, int count); /** * @brief Destroy text-detector's handle * @param[in] handle text-detector's handle created by \ref mmdeploy_text_detector_create_by_path or * \ref mmdeploy_text_detector_create */ -MM_SDK_API void mmdeploy_text_detector_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_text_detector_destroy(mm_handle_t handle); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_TEXT_DETECTOR_H diff --git a/csrc/apis/c/text_recognizer.cpp b/csrc/apis/c/text_recognizer.cpp index 1ecf29e22..9458712b5 100644 --- a/csrc/apis/c/text_recognizer.cpp +++ b/csrc/apis/c/text_recognizer.cpp @@ -74,9 +74,9 @@ int mmdeploy_text_recognizer_create_impl(ModelType &&m, const char *device_name, return MM_SUCCESS; } catch (const std::exception &e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } @@ -178,9 +178,9 @@ int mmdeploy_text_recognizer_apply_bbox(mm_handle_t handle, const mm_mat_t *imag return MM_SUCCESS; } catch (const std::exception &e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } diff --git a/csrc/apis/c/text_recognizer.h b/csrc/apis/c/text_recognizer.h index d5bbd5e1c..e257da583 100644 --- a/csrc/apis/c/text_recognizer.h +++ b/csrc/apis/c/text_recognizer.h @@ -11,6 +11,10 @@ #include "common.h" #include "text_detector.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef struct mm_text_recognize_t { char* text; float* score; @@ -27,8 +31,8 @@ typedef struct mm_text_recognize_t { * by \ref mmdeploy_text_recognizer_destroy * @return status code of the operation */ -MM_SDK_API int mmdeploy_text_recognizer_create(mm_model_t model, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_text_recognizer_create(mm_model_t model, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Create a text recognizer instance @@ -39,9 +43,9 @@ MM_SDK_API int mmdeploy_text_recognizer_create(mm_model_t model, const char* dev * by \ref mmdeploy_text_recognizer_destroy * @return status code of the operation */ -MM_SDK_API int mmdeploy_text_recognizer_create_by_path(const char* model_path, - const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_text_recognizer_create_by_path(const char* model_path, + const char* device_name, int device_id, + mm_handle_t* handle); /** * @brief Apply text recognizer to a batch of text images @@ -53,8 +57,8 @@ MM_SDK_API int mmdeploy_text_recognizer_create_by_path(const char* model_path, * by \ref mmdeploy_text_recognizer_release_result * @return status code of the operation */ -MM_SDK_API int mmdeploy_text_recognizer_apply(mm_handle_t handle, const mm_mat_t* images, int count, - mm_text_recognize_t** results); +MMDEPLOY_API int mmdeploy_text_recognizer_apply(mm_handle_t handle, const mm_mat_t* images, + int count, mm_text_recognize_t** results); /** * @brief Apply text recognizer to a batch of images supplied with text bboxes @@ -68,23 +72,28 @@ MM_SDK_API int mmdeploy_text_recognizer_apply(mm_handle_t handle, const mm_mat_t * bboxes, must be release by \ref mmdeploy_text_recognizer_release_result * @return status code of the operation */ -MM_SDK_API int mmdeploy_text_recognizer_apply_bbox(mm_handle_t handle, const mm_mat_t* images, - int image_count, const mm_text_detect_t* bboxes, - const int* bbox_count, - mm_text_recognize_t** results); +MMDEPLOY_API int mmdeploy_text_recognizer_apply_bbox(mm_handle_t handle, const mm_mat_t* images, + int image_count, + const mm_text_detect_t* bboxes, + const int* bbox_count, + mm_text_recognize_t** results); /** @brief Release result buffer returned by \ref mmdeploy_text_recognizer_apply or \ref * mmdeploy_text_recognizer_apply_bbox * @param[in] results result buffer by text recognizer * @param[in] count length of \p result */ -MM_SDK_API void mmdeploy_text_recognizer_release_result(mm_text_recognize_t* results, int count); +MMDEPLOY_API void mmdeploy_text_recognizer_release_result(mm_text_recognize_t* results, int count); /** * @brief destroy text recognizer * @param[in] handle handle of text recognizer created by \ref * mmdeploy_text_recognizer_create_by_path or \ref mmdeploy_text_recognizer_create */ -MM_SDK_API void mmdeploy_text_recognizer_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_text_recognizer_destroy(mm_handle_t handle); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_SRC_APIS_C_TEXT_RECOGNIZER_H_ diff --git a/csrc/apis/python/CMakeLists.txt b/csrc/apis/python/CMakeLists.txt index 1d98d84d4..0730268f0 100644 --- a/csrc/apis/python/CMakeLists.txt +++ b/csrc/apis/python/CMakeLists.txt @@ -23,11 +23,10 @@ mmdeploy_python_add_module(restorer) pybind11_add_module(${PROJECT_NAME} ${MMDEPLOY_PYTHON_SRCS}) -target_link_libraries(${PROJECT_NAME} PRIVATE - ${MMDEPLOY_LIBS} - -Wl,--whole-archive ${MMDEPLOY_STATIC_MODULES} -Wl,--no-whole-archive - -Wl,--no-as-needed ${MMDEPLOY_DYNAMIC_MODULES} -Wl,--as-need) +mmdeploy_load_static(${PROJECT_NAME} MMDeployStaticModules) +mmdeploy_load_dynamic(${PROJECT_NAME} MMDeployDynamicModules) +target_link_libraries(${PROJECT_NAME} PRIVATE MMDeployLibs) target_include_directories(${PROJECT_NAME} PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/../..) + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_SOURCE_DIR}/../..) diff --git a/csrc/archive/CMakeLists.txt b/csrc/archive/CMakeLists.txt index 2b34ab1bb..264597415 100644 --- a/csrc/archive/CMakeLists.txt +++ b/csrc/archive/CMakeLists.txt @@ -6,7 +6,6 @@ add_library(${PROJECT_NAME} INTERFACE) target_link_libraries(${PROJECT_NAME} INTERFACE mmdeploy::core) add_library(mmdeploy::archive ALIAS mmdeploy_archive) -export_target(${PROJECT_NAME}) install(DIRECTORY ${CMAKE_SOURCE_DIR}/src/archive DESTINATION include/cpp FILES_MATCHING PATTERN "*.h") diff --git a/csrc/archive/json_archive.h b/csrc/archive/json_archive.h index 31666d89b..6f137b9a7 100644 --- a/csrc/archive/json_archive.h +++ b/csrc/archive/json_archive.h @@ -133,7 +133,7 @@ inline Value json_to_value(const nlohmann::json& json) { return value; } default: - ERROR("unsupported json type: {}", json.type_name()); + MMDEPLOY_ERROR("unsupported json type: {}", json.type_name()); return {}; } } diff --git a/csrc/archive/value_archive.h b/csrc/archive/value_archive.h index 8500cbc42..f4115b3cb 100644 --- a/csrc/archive/value_archive.h +++ b/csrc/archive/value_archive.h @@ -114,6 +114,8 @@ inline T from_value(const Value& value) { return x; } +namespace detail { + inline void load(ValueInputArchive& archive, Value& v) { archive.native(v); } template , Value>::value, bool> = true> @@ -121,6 +123,8 @@ inline void save(ValueOutputArchive& archive, T&& v) { archive.native(std::forward(v)); } +} // namespace detail + } // namespace mmdeploy #endif // MMDEPLOY_SRC_ARCHIVE_VALUE_ARCHIVE_H_ diff --git a/csrc/backend_ops/CMakeLists.txt b/csrc/backend_ops/CMakeLists.txt index 91117d2db..a9eac86ae 100644 --- a/csrc/backend_ops/CMakeLists.txt +++ b/csrc/backend_ops/CMakeLists.txt @@ -1,26 +1,28 @@ -set(CMAKE_CXX_STANDARD 14) +if (NOT MSVC) + set(CMAKE_CXX_STANDARD 14) +endif () set(CMAKE_CXX_FLAGS_RELEASE "-O3") # build ONNXRUNTIME ops if ("ort" IN_LIST MMDEPLOY_TARGET_BACKENDS) - if (NOT DEFINED ONNXRUNTIME_DIR) - set(ONNXRUNTIME_DIR $ENV{ONNXRUNTIME_DIR}) - endif () - if (NOT ONNXRUNTIME_DIR) - message(FATAL_ERROR " ONNXRUNTIME_DIR is not found.") - else () - message(STATUS "Build ONNXRUNTIME custom ops.") - add_subdirectory(onnxruntime) - endif () + if (NOT DEFINED ONNXRUNTIME_DIR) + set(ONNXRUNTIME_DIR $ENV{ONNXRUNTIME_DIR}) + endif () + if (NOT ONNXRUNTIME_DIR) + message(FATAL_ERROR " ONNXRUNTIME_DIR is not found.") + else () + message(STATUS "Build ONNXRUNTIME custom ops.") + add_subdirectory(onnxruntime) + endif () endif () # build TensorRT ops if ("trt" IN_LIST MMDEPLOY_TARGET_BACKENDS) - if (NOT DEFINED TENSORRT_DIR) - set(TENSORRT_DIR $ENV{TENSORRT_DIR}) - endif () - message(STATUS "Build TensorRT custom ops.") - add_subdirectory(tensorrt) + if (NOT DEFINED TENSORRT_DIR) + set(TENSORRT_DIR $ENV{TENSORRT_DIR}) + endif () + message(STATUS "Build TensorRT custom ops.") + add_subdirectory(tensorrt) endif () # build NCNN ops diff --git a/csrc/backend_ops/ncnn/CMakeLists.txt b/csrc/backend_ops/ncnn/CMakeLists.txt index 6345448e6..9580d3b96 100755 --- a/csrc/backend_ops/ncnn/CMakeLists.txt +++ b/csrc/backend_ops/ncnn/CMakeLists.txt @@ -4,21 +4,20 @@ cmake_minimum_required(VERSION 3.14) # ncnn find_package(ncnn) -if(ncnn_FOUND) - message(STATUS "ncnn library found!") -else() - message(FATAL_ERROR "Could not locate ncnn") -endif() +if (ncnn_FOUND) + message(STATUS "ncnn library found!") +else () + message(FATAL_ERROR "Could not locate ncnn") +endif () -set_targets(mmdeploy_ncnn_ops NCNN_OPS_OBJ NCNN_OPS_STATIC NCNN_OPS_SHARED) -if(NOT ANDROID AND NOT IOS) - add_subdirectory(ops) - add_subdirectory(onnx2ncnn) - add_subdirectory(pyncnn_ext) -else() - # In case of embedded platform, like android, or ios, we only build custom ncnn - # ops, and leave the executable converter(onnx2ncnn, pyncnn_ext) built under - # the host platforms - add_subdirectory(ops) -endif() +if (NOT ANDROID AND NOT IOS) + add_subdirectory(ops) + add_subdirectory(onnx2ncnn) + add_subdirectory(pyncnn_ext) +else () + # In case of embedded platform, like android, or ios, we only build custom ncnn + # ops, and leave the executable converter(onnx2ncnn, pyncnn_ext) built under + # the host platforms + add_subdirectory(ops) +endif () diff --git a/csrc/backend_ops/ncnn/ops/CMakeLists.txt b/csrc/backend_ops/ncnn/ops/CMakeLists.txt index aa8972984..4a4334518 100755 --- a/csrc/backend_ops/ncnn/ops/CMakeLists.txt +++ b/csrc/backend_ops/ncnn/ops/CMakeLists.txt @@ -2,18 +2,24 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_ncnn_ops) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) # add plugin source file(GLOB_RECURSE NCNN_OPS_SRCS *.cpp) -build_object_target(${NCNN_OPS_OBJ} "${NCNN_OPS_SRCS}") -target_link_libraries(${NCNN_OPS_OBJ} ncnn) +add_library(${PROJECT_NAME}_obj OBJECT "${NCNN_OPS_SRCS}") +set_target_properties(${PROJECT_NAME}_obj PROPERTIES POSITION_INDEPENDENT_CODE 1) +target_include_directories(${PROJECT_NAME}_obj PUBLIC + $) +set(_COMMON_INCLUDE_DIRS + $ + $) +target_include_directories(${PROJECT_NAME}_obj + PUBLIC ${_COMMON_INCLUDE_DIRS}) +mmdeploy_export(${PROJECT_NAME}_obj) -build_shared_target(${NCNN_OPS_SHARED} ${NCNN_OPS_OBJ} "PRIVATE") -install_targets(${NCNN_OPS_SHARED}) +mmdeploy_add_library(${PROJECT_NAME} SHARED EXCLUDE "") +target_link_libraries(${PROJECT_NAME} PRIVATE ${PROJECT_NAME}_obj) +target_include_directories(${PROJECT_NAME} + PUBLIC ${_COMMON_INCLUDE_DIRS}) -if (MMDEPLOY_BUILD_SDK) - ## Build static library. SDK's uses it to build `ncnn_net` module - build_static_target(${NCNN_OPS_STATIC} ${NCNN_OPS_OBJ} "PRIVATE") - add_library(mmdeploy::ncnn_ops::static ALIAS ${NCNN_OPS_STATIC}) -endif () +add_library(mmdeploy::ncnn_ops ALIAS ${PROJECT_NAME}) diff --git a/csrc/backend_ops/ncnn/ops/ncnn_ops_register.h b/csrc/backend_ops/ncnn/ops/ncnn_ops_register.h index 2fb07d8b0..b265f6d4a 100755 --- a/csrc/backend_ops/ncnn/ops/ncnn_ops_register.h +++ b/csrc/backend_ops/ncnn/ops/ncnn_ops_register.h @@ -5,13 +5,14 @@ #include #include +#include "core/macro.h" #include "net.h" extern "C" { -std::map& get_mmdeploy_layer_creator(); -std::map& get_mmdeploy_layer_destroyer(); +MMDEPLOY_API std::map& get_mmdeploy_layer_creator(); +MMDEPLOY_API std::map& get_mmdeploy_layer_destroyer(); -int register_mmdeploy_custom_layers(ncnn::Net& net); +MMDEPLOY_API int register_mmdeploy_custom_layers(ncnn::Net& net); } #endif diff --git a/csrc/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt b/csrc/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt index f0d414896..b60c91006 100755 --- a/csrc/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt +++ b/csrc/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt @@ -6,9 +6,10 @@ project(ncnn_ext) if (NOT TARGET pybind11) add_subdirectory(${CMAKE_SOURCE_DIR}/third_party/pybind11 pybind11) endif () -include_directories(${pybind11_INCLUDE_DIR} ${PYTHON_INCLUDE_DIRS}) + pybind11_add_module(ncnn_ext ncnn_ext.cpp) -target_link_libraries(ncnn_ext PUBLIC ncnn ${NCNN_OPS_SHARED}) + +target_link_libraries(ncnn_ext PUBLIC mmdeploy_ncnn_ops ncnn) set_target_properties( ncnn_ext PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_SOURCE_DIR}/mmdeploy/backend/ncnn) diff --git a/csrc/backend_ops/ncnn/pyncnn_ext/ncnn_ext.cpp b/csrc/backend_ops/ncnn/pyncnn_ext/ncnn_ext.cpp index e9ff04eb4..ac158b9ed 100755 --- a/csrc/backend_ops/ncnn/pyncnn_ext/ncnn_ext.cpp +++ b/csrc/backend_ops/ncnn/pyncnn_ext/ncnn_ext.cpp @@ -1,7 +1,7 @@ // Copyright (c) OpenMMLab. All rights reserved. #include -#include "../ops/ncnn_ops_register.h" +#include "ncnn_ops_register.h" #include "net.h" PYBIND11_MODULE(ncnn_ext, m) { diff --git a/csrc/backend_ops/onnxruntime/CMakeLists.txt b/csrc/backend_ops/onnxruntime/CMakeLists.txt index f646bbc98..613a60881 100644 --- a/csrc/backend_ops/onnxruntime/CMakeLists.txt +++ b/csrc/backend_ops/onnxruntime/CMakeLists.txt @@ -2,26 +2,23 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_onnxruntime_ops) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) -set_targets(${PROJECT_NAME} ORT_OPS_OBJ ORT_OPS_STATIC ORT_OPS_MODULE) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) # add plugin source file(GLOB_RECURSE ORT_OPS_SRCS *.cpp) -build_object_target(${ORT_OPS_OBJ} "${ORT_OPS_SRCS}") -target_include_directories(${ORT_OPS_OBJ} PUBLIC +add_library(${PROJECT_NAME}_obj OBJECT "${ORT_OPS_SRCS}") +target_compile_definitions(${PROJECT_NAME}_obj PRIVATE -DMMDEPLOY_API_EXPORTS=1) +set_target_properties(${PROJECT_NAME}_obj PROPERTIES POSITION_INDEPENDENT_CODE 1) +mmdeploy_export(${PROJECT_NAME}_obj) + +target_include_directories(${PROJECT_NAME}_obj PUBLIC $ - $) -target_link_directories(${ORT_OPS_OBJ} PUBLIC + $ + $) +target_link_directories(${PROJECT_NAME}_obj PUBLIC ${ONNXRUNTIME_DIR}/lib) -target_link_libraries(${ORT_OPS_OBJ} PUBLIC onnxruntime) - -add_library(${ORT_OPS_MODULE} MODULE $) -target_link_libraries(${ORT_OPS_MODULE} PRIVATE ${ORT_OPS_OBJ}) -add_library(mmdeploy::onnxruntime::ops ALIAS ${ORT_OPS_MODULE}) -install_targets(${ORT_OPS_MODULE}) +target_link_libraries(${PROJECT_NAME}_obj PUBLIC onnxruntime) -if (MMDEPLOY_BUILD_SDK) - ## Build static library. SDK's uses it to build `ort_net` module - build_static_target(${ORT_OPS_STATIC} ${ORT_OPS_OBJ} "PRIVATE") - add_library(mmdeploy::onnxruntime::ops::static ALIAS ${ORT_OPS_STATIC}) -endif () +mmdeploy_add_library(${PROJECT_NAME} SHARED EXCLUDE "") +target_link_libraries(${PROJECT_NAME} PUBLIC ${PROJECT_NAME}_obj) +add_library(mmdeploy::onnxruntime::ops ALIAS ${PROJECT_NAME}) diff --git a/csrc/backend_ops/onnxruntime/common/onnxruntime_register.h b/csrc/backend_ops/onnxruntime/common/onnxruntime_register.h index 84318bc81..344031e79 100644 --- a/csrc/backend_ops/onnxruntime/common/onnxruntime_register.h +++ b/csrc/backend_ops/onnxruntime/common/onnxruntime_register.h @@ -3,11 +3,14 @@ #define ONNXRUNTIME_REGISTER_H #include +#include "core/macro.h" + #ifdef __cplusplus extern "C" { #endif -OrtStatus *ORT_API_CALL RegisterCustomOps(OrtSessionOptions *options, const OrtApiBase *api); +MMDEPLOY_API OrtStatus *ORT_API_CALL RegisterCustomOps(OrtSessionOptions *options, + const OrtApiBase *api); #ifdef __cplusplus } diff --git a/csrc/backend_ops/onnxruntime/onnxruntime_register.cpp b/csrc/backend_ops/onnxruntime/onnxruntime_register.cpp index 9f2ce2cc0..f7b9cedff 100644 --- a/csrc/backend_ops/onnxruntime/onnxruntime_register.cpp +++ b/csrc/backend_ops/onnxruntime/onnxruntime_register.cpp @@ -7,7 +7,6 @@ const char *c_MMDeployOpDomain = "mmdeploy"; OrtStatus *ORT_API_CALL RegisterCustomOps(OrtSessionOptions *options, const OrtApiBase *api) { const OrtApi *kOrtApi = api->GetApi(ORT_API_VERSION); - OrtStatus *status = nullptr; for (auto &_op_list_pair : mmdeploy::get_mmdeploy_custom_ops()) { OrtCustomOpDomain *domain = nullptr; diff --git a/csrc/backend_ops/tensorrt/CMakeLists.txt b/csrc/backend_ops/tensorrt/CMakeLists.txt index 88a0176df..796a600eb 100644 --- a/csrc/backend_ops/tensorrt/CMakeLists.txt +++ b/csrc/backend_ops/tensorrt/CMakeLists.txt @@ -3,41 +3,35 @@ cmake_minimum_required(VERSION 3.14) include(${CMAKE_SOURCE_DIR}/cmake/cuda.cmake NO_POLICY_SCOPE) project(mmdeploy_tensorrt_ops CUDA CXX) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) include(${CMAKE_SOURCE_DIR}/cmake/cuda.cmake NO_POLICY_SCOPE) include(${CMAKE_SOURCE_DIR}/cmake/tensorrt.cmake NO_POLICY_SCOPE) -set_targets(${PROJECT_NAME} BACKEND_OPS_OBJ BACKEND_OPS_STATIC BACKEND_OPS_MODULE) - # cub if (NOT DEFINED CUB_ROOT_DIR) if (CUDA_VERSION VERSION_LESS 11.0) set(CUB_ROOT_DIR "${CMAKE_SOURCE_DIR}/third_party/cub") - endif() + endif () endif () file(GLOB_RECURSE BACKEND_OPS_SRCS *.cpp *.cu) -build_object_target(${BACKEND_OPS_OBJ} "${BACKEND_OPS_SRCS}") -target_compile_definitions(${BACKEND_OPS_OBJ} +add_library(${PROJECT_NAME}_obj OBJECT "${BACKEND_OPS_SRCS}") +set_target_properties(${PROJECT_NAME}_obj PROPERTIES POSITION_INDEPENDENT_CODE 1) +target_compile_definitions(${PROJECT_NAME}_obj PRIVATE -DTHRUST_IGNORE_DEPRECATED_CPP_DIALECT=1) -target_include_directories(${BACKEND_OPS_OBJ} +target_include_directories(${PROJECT_NAME}_obj PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/common) -target_include_directories(${BACKEND_OPS_OBJ} +target_include_directories(${PROJECT_NAME}_obj PRIVATE ${CUDA_TOOLKIT_ROOT_DIR}/include) -target_include_directories(${BACKEND_OPS_OBJ} PRIVATE ${TENSORRT_INCLUDE_DIR}) -target_include_directories(${BACKEND_OPS_OBJ} PRIVATE ${CUDNN_DIR}/include) -target_include_directories(${BACKEND_OPS_OBJ} PRIVATE ${CUB_ROOT_DIR}) -target_link_directories(${BACKEND_OPS_OBJ} PUBLIC ${CUDNN_DIR}/lib64) -target_link_libraries(${BACKEND_OPS_OBJ} - PRIVATE ${TENSORRT_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} cudnn) +target_include_directories(${PROJECT_NAME}_obj PRIVATE ${TENSORRT_INCLUDE_DIR}) +target_include_directories(${PROJECT_NAME}_obj PRIVATE ${CUDNN_DIR}/include) +target_include_directories(${PROJECT_NAME}_obj PRIVATE ${CUB_ROOT_DIR}) +target_link_directories(${PROJECT_NAME}_obj PUBLIC ${CUDNN_DIR}/lib64 ${CUDNN_DIR}/lib/x64) +target_link_libraries(${PROJECT_NAME}_obj + PUBLIC ${TENSORRT_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} cudnn) +mmdeploy_export(${PROJECT_NAME}_obj) # Build module library. It is used to convert onnx model to tensorrt engine -build_module_target(${BACKEND_OPS_MODULE} ${BACKEND_OPS_OBJ} "PRIVATE") -add_library(mmdeploy::tensorrt_ops ALIAS ${BACKEND_OPS_MODULE}) -install_targets(${BACKEND_OPS_MODULE}) - -if (MMDEPLOY_BUILD_SDK) - ## Build static library. SDK's uses it to build `trt_net` module - build_static_target(${BACKEND_OPS_STATIC} ${BACKEND_OPS_OBJ} "PRIVATE") - add_library(mmdeploy::tensorrt_ops::static ALIAS ${BACKEND_OPS_STATIC}) -endif () +mmdeploy_add_module(${PROJECT_NAME} MODULE EXCLUDE "") +target_link_libraries(${PROJECT_NAME} PRIVATE ${PROJECT_NAME}_obj) +add_library(mmdeploy::tensorrt_ops ALIAS ${PROJECT_NAME}) diff --git a/csrc/codebase/CMakeLists.txt b/csrc/codebase/CMakeLists.txt index 023be7668..9ef6490a8 100644 --- a/csrc/codebase/CMakeLists.txt +++ b/csrc/codebase/CMakeLists.txt @@ -9,8 +9,8 @@ if ("all" IN_LIST MMDEPLOY_CODEBASES) list(APPEND CODEBASES "mmseg") list(APPEND CODEBASES "mmocr") list(APPEND CODEBASES "mmedit") -else() - set (CODEBASES ${MMDEPLOY_CODEBASES}) +else () + set(CODEBASES ${MMDEPLOY_CODEBASES}) endif () foreach (codebase IN LISTS CODEBASES) diff --git a/csrc/codebase/common.h b/csrc/codebase/common.h index b0b164ddb..c815aa11f 100644 --- a/csrc/codebase/common.h +++ b/csrc/codebase/common.h @@ -14,7 +14,7 @@ namespace mmdeploy { class Context { public: explicit Context(const Value& config) { - DEBUG("config: {}", config); + MMDEPLOY_DEBUG("config: {}", config); device_ = config["context"]["device"].get(); stream_ = config["context"]["stream"].get(); } @@ -35,17 +35,17 @@ class CodebaseCreator : public Creator { std::unique_ptr Create(const Value& cfg) override { constexpr auto key{"component"}; if (!cfg.contains(key)) { - ERROR("no key '{}' in config {}", key, cfg); + MMDEPLOY_ERROR("no key '{}' in config {}", key, cfg); throw_exception(eInvalidArgument); } if (!cfg[key].is_string()) { - ERROR("key '{}' is not a string", key); + MMDEPLOY_ERROR("key '{}' is not a string", key); throw_exception(eInvalidArgument); } auto postprocess_type = cfg[key].get(); auto creator = Registry::Get().GetCreator(postprocess_type); if (creator == nullptr) { - ERROR("could not found entry '{}' in {}", postprocess_type, Tag::name); + MMDEPLOY_ERROR("could not found entry '{}' in {}", postprocess_type, Tag::name); throw_exception(eEntryNotFound); } return creator->Create(cfg); diff --git a/csrc/codebase/mmcls/CMakeLists.txt b/csrc/codebase/mmcls/CMakeLists.txt index c2b254149..259b653cd 100644 --- a/csrc/codebase/mmcls/CMakeLists.txt +++ b/csrc/codebase/mmcls/CMakeLists.txt @@ -2,10 +2,8 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_mmcls) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) file(GLOB_RECURSE SRCS ${CMAKE_CURRENT_SOURCE_DIR} "*.cpp") -build_target(${PROJECT_NAME} "${SRCS}") -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core) +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") add_library(mmdeploy::mmcls ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/codebase/mmcls/linear_cls.cpp b/csrc/codebase/mmcls/linear_cls.cpp index 07704cd08..8b14f4e92 100644 --- a/csrc/codebase/mmcls/linear_cls.cpp +++ b/csrc/codebase/mmcls/linear_cls.cpp @@ -1,5 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. +#include #include #include "codebase/mmcls/mmcls.h" @@ -18,19 +19,19 @@ class LinearClsHead : public MMClassification { if (cfg.contains("params")) { topk_ = cfg["params"].value("topk", 1); if (topk_ <= 0) { - ERROR("'topk' should be greater than 0, but got '{}'", topk_); + MMDEPLOY_ERROR("'topk' should be greater than 0, but got '{}'", topk_); throw_exception(eInvalidArgument); } } } Result operator()(const Value& infer_res) { - DEBUG("infer_res: {}", infer_res); + MMDEPLOY_DEBUG("infer_res: {}", infer_res); auto output = infer_res["output"].get(); if (!(output.shape().size() >= 2 && output.data_type() == DataType::kFLOAT)) { - ERROR("unsupported `output` tensor, shape: {}, dtype: {}", output.shape(), - (int)output.data_type()); + MMDEPLOY_ERROR("unsupported `output` tensor, shape: {}, dtype: {}", output.shape(), + (int)output.data_type()); return Status(eNotSupported); } @@ -53,7 +54,7 @@ class LinearClsHead : public MMClassification { [&](int i, int j) { return scores_data[i] > scores_data[j]; }); for (int i = 0; i < topk_; ++i) { auto label = ClassifyOutput::Label{idx[i], scores_data[idx[i]]}; - DEBUG("label_id: {}, score: {}", label.label_id, label.score); + MMDEPLOY_DEBUG("label_id: {}, score: {}", label.label_id, label.score); output.labels.push_back(label); } return to_value(std::move(output)); diff --git a/csrc/codebase/mmcls/mmcls.cpp b/csrc/codebase/mmcls/mmcls.cpp index cd5990724..973a4c6d8 100644 --- a/csrc/codebase/mmcls/mmcls.cpp +++ b/csrc/codebase/mmcls/mmcls.cpp @@ -2,8 +2,12 @@ #include "codebase/mmcls/mmcls.h" -namespace mmdeploy::mmcls { +namespace mmdeploy { +namespace mmcls { REGISTER_CODEBASE(MMClassification); -} // namespace mmdeploy::mmcls +} + +MMDEPLOY_DEFINE_REGISTRY(mmcls::MMClassification); +} // namespace mmdeploy diff --git a/csrc/codebase/mmcls/mmcls.h b/csrc/codebase/mmcls/mmcls.h index 5cddfd197..2b87b2d53 100644 --- a/csrc/codebase/mmcls/mmcls.h +++ b/csrc/codebase/mmcls/mmcls.h @@ -8,7 +8,8 @@ #include "core/module.h" #include "core/serialization.h" -namespace mmdeploy::mmcls { +namespace mmdeploy { +namespace mmcls { struct ClassifyOutput { struct Label { @@ -21,7 +22,9 @@ struct ClassifyOutput { }; DECLARE_CODEBASE(MMClassification, mmcls); +} // namespace mmcls -} // namespace mmdeploy::mmcls +MMDEPLOY_DECLARE_REGISTRY(mmcls::MMClassification); +} // namespace mmdeploy #endif // MMDEPLOY_SRC_CODEBASE_MMCLS_MMCLS_H_ diff --git a/csrc/codebase/mmdet/CMakeLists.txt b/csrc/codebase/mmdet/CMakeLists.txt index 31a00813b..75ae6c4d4 100644 --- a/csrc/codebase/mmdet/CMakeLists.txt +++ b/csrc/codebase/mmdet/CMakeLists.txt @@ -3,10 +3,11 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_mmdet) include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) file(GLOB_RECURSE SRCS ${CMAKE_CURRENT_SOURCE_DIR} "*.cpp") -build_target(${PROJECT_NAME} "${SRCS}") -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core opencv_core) +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") +target_link_libraries(${PROJECT_NAME} + PRIVATE mmdeploy_opencv_utils) + add_library(mmdeploy::mmdet ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/codebase/mmdet/instance_segmentation.cpp b/csrc/codebase/mmdet/instance_segmentation.cpp index 481e0b1e2..638ce0b80 100644 --- a/csrc/codebase/mmdet/instance_segmentation.cpp +++ b/csrc/codebase/mmdet/instance_segmentation.cpp @@ -5,7 +5,7 @@ #include "experimental/module_adapter.h" #include "object_detection.h" #include "opencv2/imgproc/imgproc.hpp" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" namespace mmdeploy::mmdet { @@ -19,35 +19,35 @@ class ResizeInstanceMask : public ResizeBBox { // TODO: remove duplication Result operator()(const Value& prep_res, const Value& infer_res) { - DEBUG("prep_res: {}\ninfer_res: {}", prep_res, infer_res); + MMDEPLOY_DEBUG("prep_res: {}\ninfer_res: {}", prep_res, infer_res); try { auto dets = infer_res["dets"].get(); auto labels = infer_res["labels"].get(); auto masks = infer_res["masks"].get(); - DEBUG("dets.shape: {}", dets.shape()); - DEBUG("labels.shape: {}", labels.shape()); - DEBUG("masks.shape: {}", masks.shape()); + MMDEPLOY_DEBUG("dets.shape: {}", dets.shape()); + MMDEPLOY_DEBUG("labels.shape: {}", labels.shape()); + MMDEPLOY_DEBUG("masks.shape: {}", masks.shape()); // `dets` is supposed to have 3 dims. They are 'batch', 'bboxes_number' // and 'channels' respectively if (!(dets.shape().size() == 3 && dets.data_type() == DataType::kFLOAT)) { - ERROR("unsupported `dets` tensor, shape: {}, dtype: {}", dets.shape(), - (int)dets.data_type()); + MMDEPLOY_ERROR("unsupported `dets` tensor, shape: {}, dtype: {}", dets.shape(), + (int)dets.data_type()); return Status(eNotSupported); } // `labels` is supposed to have 2 dims, which are 'batch' and // 'bboxes_number' if (labels.shape().size() != 2) { - ERROR("unsupported `labels`, tensor, shape: {}, dtype: {}", labels.shape(), - (int)labels.data_type()); + MMDEPLOY_ERROR("unsupported `labels`, tensor, shape: {}, dtype: {}", labels.shape(), + (int)labels.data_type()); return Status(eNotSupported); } if (!(masks.shape().size() == 4 && masks.data_type() == DataType::kFLOAT)) { - ERROR("unsupported `mask` tensor, shape: {}, dtype: {}", masks.shape(), - (int)masks.data_type()); + MMDEPLOY_ERROR("unsupported `mask` tensor, shape: {}, dtype: {}", masks.shape(), + (int)masks.data_type()); return Status(eNotSupported); } @@ -65,7 +65,7 @@ class ResizeInstanceMask : public ResizeBBox { return to_value(result); } catch (const std::exception& e) { - ERROR("{}", e.what()); + MMDEPLOY_ERROR("{}", e.what()); return Status(eFail); } } @@ -74,10 +74,10 @@ class ResizeInstanceMask : public ResizeBBox { void ProcessMasks(DetectorOutput& result, Tensor cpu_masks, int img_w, int img_h) const { auto shape = TensorShape{cpu_masks.shape(1), cpu_masks.shape(2), cpu_masks.shape(3)}; cpu_masks.Reshape(shape); - + MMDEPLOY_DEBUG("{}, {}", cpu_masks.shape(), cpu_masks.data_type()); for (auto& det : result.detections) { auto mask = cpu_masks.Slice(det.index); - cv::Mat mask_mat(mask.shape(1), mask.shape(2), CV_32F, mask.data()); + cv::Mat mask_mat((int)mask.shape(1), (int)mask.shape(2), CV_32F, mask.data()); cv::Mat warped_mask; auto& bbox = det.bbox; // same as mmdet with skip_empty = True @@ -97,7 +97,9 @@ class ResizeInstanceMask : public ResizeBBox { cv::warpAffine(mask_mat, warped_mask, m, cv::Size{width, height}, cv::INTER_LINEAR | cv::WARP_INVERSE_MAP); warped_mask = warped_mask > mask_thr_binary_; - det.mask = cpu::CVMat2Mat(warped_mask, PixelFormat::kGRAYSCALE); + + det.mask = Mat(height, width, PixelFormat::kGRAYSCALE, DataType::kINT8, + std::shared_ptr(warped_mask.data, [mat = warped_mask](void*) {})); } } diff --git a/csrc/codebase/mmdet/mmdet.cpp b/csrc/codebase/mmdet/mmdet.cpp index 218b73c50..45fe21f9b 100644 --- a/csrc/codebase/mmdet/mmdet.cpp +++ b/csrc/codebase/mmdet/mmdet.cpp @@ -2,8 +2,12 @@ #include "codebase/mmdet/mmdet.h" -namespace mmdeploy::mmdet { +namespace mmdeploy { +namespace mmdet { REGISTER_CODEBASE(MMDetection); -} // namespace mmdeploy::mmdet +} + +MMDEPLOY_DEFINE_REGISTRY(mmdet::MMDetection); +} // namespace mmdeploy diff --git a/csrc/codebase/mmdet/mmdet.h b/csrc/codebase/mmdet/mmdet.h index a66300403..dcb1e1819 100644 --- a/csrc/codebase/mmdet/mmdet.h +++ b/csrc/codebase/mmdet/mmdet.h @@ -3,13 +3,17 @@ #ifndef MMDEPLOY_SRC_CODEBASE_MMDET_MMDET_H_ #define MMDEPLOY_SRC_CODEBASE_MMDET_MMDET_H_ +#include + #include "codebase/common.h" #include "core/device.h" #include "core/mat.h" #include "core/module.h" +#include "core/registry.h" #include "core/serialization.h" -namespace mmdeploy::mmdet { +namespace mmdeploy { +namespace mmdet { struct DetectorOutput { struct Detection { @@ -25,7 +29,9 @@ struct DetectorOutput { }; DECLARE_CODEBASE(MMDetection, mmdet); +} // namespace mmdet -} // namespace mmdeploy::mmdet +MMDEPLOY_DECLARE_REGISTRY(mmdet::MMDetection); +} // namespace mmdeploy #endif // MMDEPLOY_SRC_CODEBASE_MMDET_MMDET_H_ diff --git a/csrc/codebase/mmdet/object_detection.cpp b/csrc/codebase/mmdet/object_detection.cpp index 6a7c6d6a3..62a5c2101 100644 --- a/csrc/codebase/mmdet/object_detection.cpp +++ b/csrc/codebase/mmdet/object_detection.cpp @@ -17,26 +17,27 @@ ResizeBBox::ResizeBBox(const Value& cfg) : MMDetection(cfg) { } } Result ResizeBBox::operator()(const Value& prep_res, const Value& infer_res) { - DEBUG("prep_res: {}\ninfer_res: {}", prep_res, infer_res); + MMDEPLOY_DEBUG("prep_res: {}\ninfer_res: {}", prep_res, infer_res); try { auto dets = infer_res["dets"].get(); auto labels = infer_res["labels"].get(); - DEBUG("dets.shape: {}", dets.shape()); - DEBUG("labels.shape: {}", labels.shape()); + MMDEPLOY_DEBUG("dets.shape: {}", dets.shape()); + MMDEPLOY_DEBUG("labels.shape: {}", labels.shape()); // `dets` is supposed to have 3 dims. They are 'batch', 'bboxes_number' // and 'channels' respectively if (!(dets.shape().size() == 3 && dets.data_type() == DataType::kFLOAT)) { - ERROR("unsupported `dets` tensor, shape: {}, dtype: {}", dets.shape(), (int)dets.data_type()); + MMDEPLOY_ERROR("unsupported `dets` tensor, shape: {}, dtype: {}", dets.shape(), + (int)dets.data_type()); return Status(eNotSupported); } // `labels` is supposed to have 2 dims, which are 'batch' and // 'bboxes_number' if (labels.shape().size() != 2) { - ERROR("unsupported `labels`, tensor, shape: {}, dtype: {}", labels.shape(), - (int)labels.data_type()); + MMDEPLOY_ERROR("unsupported `labels`, tensor, shape: {}, dtype: {}", labels.shape(), + (int)labels.data_type()); return Status(eNotSupported); } @@ -98,16 +99,17 @@ Result ResizeBBox::GetBBoxes(const Value& prep_res, const Tensor auto right = dets_ptr[2]; auto bottom = dets_ptr[3]; - DEBUG("ori left {}, top {}, right {}, bottom {}, label {}", left, top, right, bottom, - *labels_ptr); + MMDEPLOY_DEBUG("ori left {}, top {}, right {}, bottom {}, label {}", left, top, right, bottom, + *labels_ptr); auto rect = MapToOriginImage(left, top, right, bottom, scale_factor.data(), w_offset, h_offset, ori_width, ori_height); if (rect[2] - rect[0] < min_bbox_size_ || rect[3] - rect[1] < min_bbox_size_) { - DEBUG("ignore small bbox with width '{}' and height '{}", rect[2] - rect[0], - rect[3] - rect[1]); + MMDEPLOY_DEBUG("ignore small bbox with width '{}' and height '{}", rect[2] - rect[0], + rect[3] - rect[1]); continue; } - DEBUG("remap left {}, top {}, right {}, bottom {}", rect[0], rect[1], rect[2], rect[3]); + MMDEPLOY_DEBUG("remap left {}, top {}, right {}, bottom {}", rect[0], rect[1], rect[2], + rect[3]); DetectorOutput::Detection det{}; det.index = i; det.label_id = static_cast(*labels_ptr); diff --git a/csrc/codebase/mmedit/CMakeLists.txt b/csrc/codebase/mmedit/CMakeLists.txt index a54664255..59646d0f6 100644 --- a/csrc/codebase/mmedit/CMakeLists.txt +++ b/csrc/codebase/mmedit/CMakeLists.txt @@ -3,10 +3,9 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_mmedit) include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) file(GLOB_RECURSE SRCS ${CMAKE_CURRENT_SOURCE_DIR} "*.cpp") -build_target(${PROJECT_NAME} "${SRCS}") -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core opencv_core) +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") +target_link_libraries(${PROJECT_NAME} PRIVATE opencv_core) add_library(mmdeploy::mmedit ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/codebase/mmedit/mmedit.cpp b/csrc/codebase/mmedit/mmedit.cpp index 28a8dfa1d..b10c084b2 100644 --- a/csrc/codebase/mmedit/mmedit.cpp +++ b/csrc/codebase/mmedit/mmedit.cpp @@ -4,8 +4,12 @@ #include "core/registry.h" -namespace mmdeploy::mmedit { +namespace mmdeploy { +namespace mmedit { REGISTER_CODEBASE(MMEdit); -} // namespace mmdeploy::mmedit +} // namespace mmedit + +MMDEPLOY_DEFINE_REGISTRY(mmedit::MMEdit); +} // namespace mmdeploy diff --git a/csrc/codebase/mmedit/mmedit.h b/csrc/codebase/mmedit/mmedit.h index ed9c3cdc8..e7c428513 100644 --- a/csrc/codebase/mmedit/mmedit.h +++ b/csrc/codebase/mmedit/mmedit.h @@ -9,12 +9,16 @@ #include "core/module.h" #include "core/serialization.h" -namespace mmdeploy::mmedit { +namespace mmdeploy { +namespace mmedit { using RestorerOutput = Mat; DECLARE_CODEBASE(MMEdit, mmedit); -} // namespace mmdeploy::mmedit +} // namespace mmedit + +MMDEPLOY_DECLARE_REGISTRY(mmedit::MMEdit); +} // namespace mmdeploy #endif // MMDEPLOY_SRC_CODEBASE_MMEDIT_MMEDIT_H_ diff --git a/csrc/codebase/mmedit/restorer.cpp b/csrc/codebase/mmedit/restorer.cpp index da06075a4..84626d15b 100644 --- a/csrc/codebase/mmedit/restorer.cpp +++ b/csrc/codebase/mmedit/restorer.cpp @@ -32,8 +32,8 @@ class TensorToImg : public MMEdit { mat_hwc.convertTo(rescale_uint8, CV_8UC(channels), 255.f); return mat; } else { - ERROR("unsupported `output` tensor, shape: {}, dtype: {}", upscale.shape(), - (int)upscale.data_type()); + MMDEPLOY_ERROR("unsupported `output` tensor, shape: {}, dtype: {}", upscale.shape(), + (int)upscale.data_type()); return Status(eNotSupported); } } diff --git a/csrc/codebase/mmocr/CMakeLists.txt b/csrc/codebase/mmocr/CMakeLists.txt index 42e63a0df..60ac5c6ff 100644 --- a/csrc/codebase/mmocr/CMakeLists.txt +++ b/csrc/codebase/mmocr/CMakeLists.txt @@ -3,13 +3,13 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_mmocr) include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR} OCR_SRCS) aux_source_directory(${CMAKE_SOURCE_DIR}/third_party/clipper CLIPPER_SRCS) set(SRCS ${OCR_SRCS} ${CLIPPER_SRCS}) -build_target(${PROJECT_NAME} "${SRCS}") +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") target_include_directories(${PROJECT_NAME} PRIVATE ${CMAKE_SOURCE_DIR}/third_party/clipper) -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core opencv_core opencv_imgproc) +target_link_libraries(${PROJECT_NAME} + PRIVATE mmdeploy::transform mmdeploy_opencv_utils) add_library(mmdeploy::mmocr ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/codebase/mmocr/crnn.cpp b/csrc/codebase/mmocr/crnn.cpp index bd6c4a617..fc611e858 100644 --- a/csrc/codebase/mmocr/crnn.cpp +++ b/csrc/codebase/mmocr/crnn.cpp @@ -1,5 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. +#include #include #include "core/device.h" @@ -22,7 +23,7 @@ class CTCConvertor : public MMOCR { explicit CTCConvertor(const Value& cfg) : MMOCR(cfg) { auto model = cfg["context"]["model"].get(); if (!cfg.contains("params")) { - ERROR("'params' is required, but it's not in the config"); + MMDEPLOY_ERROR("'params' is required, but it's not in the config"); throw_exception(eInvalidArgument); } // BaseConverter @@ -40,11 +41,11 @@ class CTCConvertor : public MMOCR { } else if (dict_type == "DICT90") { idx2char_ = SplitChars(DICT90); } else { - ERROR("unknown dict_type: {}", dict_type); + MMDEPLOY_ERROR("unknown dict_type: {}", dict_type); throw_exception(eInvalidArgument); } } else { - ERROR("either dict_file, dict_list or dict_type must be specified"); + MMDEPLOY_ERROR("either dict_file, dict_list or dict_type must be specified"); throw_exception(eInvalidArgument); } // CTCConverter @@ -62,8 +63,8 @@ class CTCConvertor : public MMOCR { auto d_conf = _prob["output"].get(); if (!(d_conf.shape().size() == 3 && d_conf.data_type() == DataType::kFLOAT)) { - ERROR("unsupported `output` tensor, shape: {}, dtype: {}", d_conf.shape(), - (int)d_conf.data_type()); + MMDEPLOY_ERROR("unsupported `output` tensor, shape: {}, dtype: {}", d_conf.shape(), + (int)d_conf.data_type()); return Status(eNotSupported); } @@ -80,7 +81,7 @@ class CTCConvertor : public MMOCR { auto [indexes, scores] = Tensor2Idx(data, w, c, valid_ratio); auto text = Idx2Str(indexes); - DEBUG("text: {}", text); + MMDEPLOY_DEBUG("text: {}", text); TextRecognizerOutput output{text, scores}; diff --git a/csrc/codebase/mmocr/dbnet.cpp b/csrc/codebase/mmocr/dbnet.cpp index 93a3d0400..bd90dca3f 100644 --- a/csrc/codebase/mmocr/dbnet.cpp +++ b/csrc/codebase/mmocr/dbnet.cpp @@ -13,7 +13,7 @@ #include "core/value.h" #include "experimental/module_adapter.h" #include "mmocr.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" namespace mmdeploy::mmocr { @@ -37,21 +37,21 @@ class DBHead : public MMOCR { } Result operator()(const Value& _data, const Value& _prob) { - DEBUG("preprocess_result: {}", _data); - DEBUG("inference_result: {}", _prob); + MMDEPLOY_DEBUG("preprocess_result: {}", _data); + MMDEPLOY_DEBUG("inference_result: {}", _prob); auto img = _data["img"].get(); - DEBUG("img shape: {}", img.shape()); + MMDEPLOY_DEBUG("img shape: {}", img.shape()); Device cpu_device{"cpu"}; OUTCOME_TRY(auto conf, MakeAvailableOnDevice(_prob["output"].get(), cpu_device, stream_)); OUTCOME_TRY(stream_.Wait()); - DEBUG("shape: {}", conf.shape()); + MMDEPLOY_DEBUG("shape: {}", conf.shape()); if (!(conf.shape().size() == 4 && conf.data_type() == DataType::kFLOAT)) { - ERROR("unsupported `output` tensor, shape: {}, dtype: {}", conf.shape(), - (int)conf.data_type()); + MMDEPLOY_ERROR("unsupported `output` tensor, shape: {}, dtype: {}", conf.shape(), + (int)conf.data_type()); return Status(eNotSupported); } @@ -103,7 +103,7 @@ class DBHead : public MMOCR { } else { assert(0); } - DEBUG("score: {}", score); + MMDEPLOY_DEBUG("score: {}", score); // cv::drawContours(score_map, vector>{approx}, -1, 1); vector scaled(begin(approx), end(approx)); @@ -133,7 +133,7 @@ class DBHead : public MMOCR { cv::Mat mask(rect.size(), CV_8U, cv::Scalar(0)); - cv::fillPoly(mask, std::vector{box}, 1, cv::LINE_8, 0, -rect.tl()); + cv::fillPoly(mask, std::vector>{box}, 1, cv::LINE_8, 0, -rect.tl()); auto mean = cv::mean(bitmap(rect), mask)[0]; return static_cast(mean); } diff --git a/csrc/codebase/mmocr/mmocr.cpp b/csrc/codebase/mmocr/mmocr.cpp index 2935f03b5..f34f918af 100644 --- a/csrc/codebase/mmocr/mmocr.cpp +++ b/csrc/codebase/mmocr/mmocr.cpp @@ -5,8 +5,12 @@ #include "core/registry.h" #include "core/utils/formatter.h" -namespace mmdeploy::mmocr { +namespace mmdeploy { +namespace mmocr { REGISTER_CODEBASE(MMOCR); -} // namespace mmdeploy::mmocr +} // namespace mmocr + +MMDEPLOY_DEFINE_REGISTRY(mmocr::MMOCR); +} // namespace mmdeploy diff --git a/csrc/codebase/mmocr/mmocr.h b/csrc/codebase/mmocr/mmocr.h index 42098af74..1871b6755 100644 --- a/csrc/codebase/mmocr/mmocr.h +++ b/csrc/codebase/mmocr/mmocr.h @@ -7,7 +7,8 @@ #include "core/device.h" #include "core/module.h" -namespace mmdeploy::mmocr { +namespace mmdeploy { +namespace mmocr { struct TextDetectorOutput { std::vector> boxes; @@ -23,6 +24,9 @@ struct TextRecognizerOutput { DECLARE_CODEBASE(MMOCR, mmocr); -} // namespace mmdeploy::mmocr +} // namespace mmocr + +MMDEPLOY_DECLARE_REGISTRY(mmocr::MMOCR); +} // namespace mmdeploy #endif // MMDEPLOY_MMOCR_H diff --git a/csrc/codebase/mmocr/resize_ocr.cpp b/csrc/codebase/mmocr/resize_ocr.cpp index 2da9bac7a..69d7602d1 100644 --- a/csrc/codebase/mmocr/resize_ocr.cpp +++ b/csrc/codebase/mmocr/resize_ocr.cpp @@ -4,12 +4,14 @@ #include "archive/json_archive.h" #include "archive/value_archive.h" +#include "core/registry.h" #include "core/tensor.h" #include "core/utils/device_utils.h" #include "core/utils/formatter.h" #include "opencv2/imgproc.hpp" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/resize.h" +#include "preprocess/transform/transform.h" using namespace std; @@ -37,7 +39,7 @@ class ResizeOCRImpl : public Module { ~ResizeOCRImpl() override = default; Result Process(const Value& input) override { - DEBUG("input: {}", input); + MMDEPLOY_DEBUG("input: {}", input); auto dst_height = height_; auto dst_min_width = min_width_; auto dst_max_width = max_width_; @@ -84,7 +86,7 @@ class ResizeOCRImpl : public Module { output["resize_shape"] = to_value(img_resize.desc().shape); output["pad_shape"] = output["resize_shape"]; output["valid_ratio"] = valid_ratio; - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } @@ -95,7 +97,7 @@ class ResizeOCRImpl : public Module { int h = desc.shape[1]; int w = desc.shape[2]; int c = desc.shape[3]; - assert(c == 3 or c == 1); + assert(c == 3 || c == 1); cv::Mat src_mat, dst_mat; if (3 == c) { // rgb src_mat = cv::Mat(h, w, CV_8UC3, const_cast(img.data())); @@ -135,6 +137,8 @@ class ResizeOCRImplCreator : public Creator { ReturnType Create(const Value& args) override { return std::make_unique(args); } }; +MMDEPLOY_DEFINE_REGISTRY(ResizeOCRImpl); + REGISTER_MODULE(ResizeOCRImpl, ResizeOCRImplCreator); class ResizeOCR : public Transform { diff --git a/csrc/codebase/mmocr/warp.cpp b/csrc/codebase/mmocr/warp.cpp index b54e6a789..56566f6d1 100644 --- a/csrc/codebase/mmocr/warp.cpp +++ b/csrc/codebase/mmocr/warp.cpp @@ -8,7 +8,7 @@ #include "core/utils/formatter.h" #include "core/value.h" #include "experimental/module_adapter.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" namespace mmdeploy { diff --git a/csrc/codebase/mmseg/CMakeLists.txt b/csrc/codebase/mmseg/CMakeLists.txt index 55bb7e9d0..89a15bb32 100644 --- a/csrc/codebase/mmseg/CMakeLists.txt +++ b/csrc/codebase/mmseg/CMakeLists.txt @@ -3,10 +3,9 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_mmseg) include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) file(GLOB_RECURSE SRCS ${CMAKE_CURRENT_SOURCE_DIR} "*.cpp") -build_target(${PROJECT_NAME} "${SRCS}") -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core opencv_core) +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") +target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy_opencv_utils) add_library(mmdeploy::mmseg ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/codebase/mmseg/mmseg.cpp b/csrc/codebase/mmseg/mmseg.cpp index 78e1d490e..6f080b822 100644 --- a/csrc/codebase/mmseg/mmseg.cpp +++ b/csrc/codebase/mmseg/mmseg.cpp @@ -4,8 +4,12 @@ using namespace std; -namespace mmdeploy::mmseg { +namespace mmdeploy { +namespace mmseg { REGISTER_CODEBASE(MMSegmentation); -} // namespace mmdeploy::mmseg +} + +MMDEPLOY_DEFINE_REGISTRY(mmseg::MMSegmentation); +} // namespace mmdeploy diff --git a/csrc/codebase/mmseg/mmseg.h b/csrc/codebase/mmseg/mmseg.h index 3685c35bf..912204783 100644 --- a/csrc/codebase/mmseg/mmseg.h +++ b/csrc/codebase/mmseg/mmseg.h @@ -8,7 +8,8 @@ #include "core/module.h" #include "core/tensor.h" -namespace mmdeploy::mmseg { +namespace mmdeploy { +namespace mmseg { struct SegmentorOutput { Tensor mask; @@ -20,6 +21,9 @@ struct SegmentorOutput { DECLARE_CODEBASE(MMSegmentation, mmseg); -} // namespace mmdeploy::mmseg +} // namespace mmseg + +MMDEPLOY_DECLARE_REGISTRY(mmseg::MMSegmentation); +} // namespace mmdeploy #endif // MMDEPLOY_MMSEG_H diff --git a/csrc/codebase/mmseg/segment.cpp b/csrc/codebase/mmseg/segment.cpp index 48afa9b57..8d5aeef08 100644 --- a/csrc/codebase/mmseg/segment.cpp +++ b/csrc/codebase/mmseg/segment.cpp @@ -4,7 +4,7 @@ #include "core/tensor.h" #include "core/utils/device_utils.h" #include "core/utils/formatter.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" namespace mmdeploy::mmseg { @@ -15,19 +15,19 @@ class ResizeMask : public MMSegmentation { try { classes_ = cfg["params"]["num_classes"].get(); } catch (const std::exception &e) { - ERROR("no ['params']['num_classes'] is specified in cfg: {}", cfg); + MMDEPLOY_ERROR("no ['params']['num_classes'] is specified in cfg: {}", cfg); throw_exception(eInvalidArgument); } } Result operator()(const Value &preprocess_result, const Value &inference_result) { - DEBUG("preprocess: {}\ninference: {}", preprocess_result, inference_result); + MMDEPLOY_DEBUG("preprocess: {}\ninference: {}", preprocess_result, inference_result); auto mask = inference_result["output"].get(); - DEBUG("tensor.name: {}, tensor.shape: {}, tensor.data_type: {}", mask.name(), mask.shape(), - mask.data_type()); + MMDEPLOY_DEBUG("tensor.name: {}, tensor.shape: {}, tensor.data_type: {}", mask.name(), + mask.shape(), mask.data_type()); if (!(mask.shape().size() == 4 && mask.shape(0) == 1 && mask.shape(1) == 1)) { - ERROR("unsupported `output` tensor, shape: {}", mask.shape()); + MMDEPLOY_ERROR("unsupported `output` tensor, shape: {}", mask.shape()); return Status(eNotSupported); } @@ -40,16 +40,14 @@ class ResizeMask : public MMSegmentation { OUTCOME_TRY(stream_.Wait()); if (mask.data_type() == DataType::kINT64) { // change kINT64 to 2 INT32 - TensorDesc desc{.device = host_tensor.device(), - .data_type = DataType::kINT32, - .shape = {1, 2, height, width}, - .name = host_tensor.name()}; + TensorDesc desc{ + host_tensor.device(), DataType::kINT32, {1, 2, height, width}, host_tensor.name()}; Tensor _host_tensor(desc, mask.buffer()); return MaskResize(_host_tensor, input_height, input_width); } else if (mask.data_type() == DataType::kINT32) { return MaskResize(host_tensor, input_height, input_width); } else { - ERROR("unsupported `output` tensor, dtype: {}", (int)mask.data_type()); + MMDEPLOY_ERROR("unsupported `output` tensor, dtype: {}", (int)mask.data_type()); return Status(eNotSupported); } } diff --git a/csrc/core/CMakeLists.txt b/csrc/core/CMakeLists.txt index ef539a05b..19b9a64a9 100644 --- a/csrc/core/CMakeLists.txt +++ b/csrc/core/CMakeLists.txt @@ -11,15 +11,14 @@ reliably on all generators, or if a new generator is added in the future that ca projects using it will be stuck. Even if CONFIGURE_DEPENDS works reliably, there is still a cost to perform the check on every rebuild. #]==] -# file(GLOB_RECURSE CORE_SRCS *.cpp) set(SPDLOG_LIB) find_package(spdlog QUIET) if (spdlog_FOUND) message(STATUS "spdlog is found") - set(SPDLOG_LIB $) + set(SPDLOG_LIB spdlog::spdlog) endif () -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(SRCS device_impl.cpp @@ -32,11 +31,14 @@ set(SRCS operator.cpp status_code.cpp tensor.cpp + registry.cpp utils/device_utils.cpp utils/formatter.cpp utils/stacktrace.cpp) -build_target(${PROJECT_NAME} "${SRCS}") + +mmdeploy_add_library(${PROJECT_NAME} ${SRCS}) target_compile_definitions(${PROJECT_NAME} PUBLIC -DMMDEPLOY_STATUS_USE_SOURCE_LOCATION=1) + target_include_directories(${PROJECT_NAME} PUBLIC $ @@ -45,7 +47,10 @@ target_include_directories(${PROJECT_NAME} $ $ ) -target_link_libraries(${PROJECT_NAME} PUBLIC ${SPDLOG_LIB} stdc++fs) +target_link_libraries(${PROJECT_NAME} PUBLIC ${SPDLOG_LIB}) +if (NOT MSVC) + target_link_libraries(${PROJECT_NAME} PUBLIC stdc++fs) +endif () add_library(mmdeploy::core ALIAS ${PROJECT_NAME}) install(DIRECTORY ${CMAKE_SOURCE_DIR}/csrc/core @@ -57,5 +62,3 @@ install(FILES ${CMAKE_SOURCE_DIR}/third_party/outcome/outcome-experimental.hpp install(DIRECTORY ${CMAKE_SOURCE_DIR}/csrc/experimental DESTINATION include/cpp FILES_MATCHING PATTERN "*.h") - -export_target(${PROJECT_NAME}) diff --git a/csrc/core/device.h b/csrc/core/device.h index 6695ca380..f337ad4dc 100644 --- a/csrc/core/device.h +++ b/csrc/core/device.h @@ -10,6 +10,7 @@ #include #include +#include "core/macro.h" #include "core/status_code.h" namespace mmdeploy { @@ -67,7 +68,7 @@ class Device { constexpr explicit Device(int platform_id, int device_id = 0) : platform_id_(platform_id), device_id_(device_id) {} - explicit Device(const char *platform_name, int device_id = 0); + MMDEPLOY_API explicit Device(const char *platform_name, int device_id = 0); constexpr int device_id() const noexcept { return device_id_; } @@ -100,7 +101,7 @@ class Device { enum class MemcpyKind : int { HtoD, DtoH, DtoD }; -class Platform { +class MMDEPLOY_API Platform { public: // throws if not found explicit Platform(const char *platform_name); @@ -133,7 +134,7 @@ Platform GetPlatform(int platform_id); Platform GetPlatform(const char *platform_name); -class Stream { +class MMDEPLOY_API Stream { public: Stream() = default; @@ -187,7 +188,7 @@ T GetNative(Stream &stream, ErrorCode *ec = nullptr) { return reinterpret_cast(stream.GetNative(ec)); } -class Event { +class MMDEPLOY_API Event { public: Event() = default; @@ -226,7 +227,7 @@ T GetNative(Event &event, ErrorCode *ec = nullptr) { return reinterpret_cast(event.GetNative(ec)); } -class Kernel { +class MMDEPLOY_API Kernel { public: Kernel() = default; explicit Kernel(std::shared_ptr impl) : impl_(std::move(impl)) {} @@ -246,7 +247,7 @@ T GetNative(Kernel &kernel, ErrorCode *ec = nullptr) { return reinterpret_cast(kernel.GetNative(ec)); } -class Allocator { +class MMDEPLOY_API Allocator { friend class Access; public: @@ -259,7 +260,7 @@ class Allocator { std::shared_ptr impl_; }; -class Buffer { +class MMDEPLOY_API Buffer { public: Buffer() = default; @@ -304,7 +305,7 @@ T GetNative(const Buffer &buffer, ErrorCode *ec = nullptr) { return reinterpret_cast(buffer.GetNative(ec)); } -class PlatformRegistry { +class MMDEPLOY_API PlatformRegistry { public: using Creator = std::function()>; @@ -332,6 +333,6 @@ class PlatformRegistry { std::vector entries_; }; -PlatformRegistry &gPlatformRegistry(); +MMDEPLOY_API PlatformRegistry &gPlatformRegistry(); } // namespace mmdeploy diff --git a/csrc/core/device_impl.cpp b/csrc/core/device_impl.cpp index 04b772314..32ed9e104 100644 --- a/csrc/core/device_impl.cpp +++ b/csrc/core/device_impl.cpp @@ -144,7 +144,7 @@ Stream::Stream(Device device, uint64_t flags) { r.error().throw_exception(); } } else { - ERROR("{}, {}", device.device_id(), device.platform_id()); + MMDEPLOY_ERROR("{}, {}", device.device_id(), device.platform_id()); throw_exception(eInvalidArgument); } } diff --git a/csrc/core/graph.cpp b/csrc/core/graph.cpp index a82463005..524542928 100644 --- a/csrc/core/graph.cpp +++ b/csrc/core/graph.cpp @@ -3,8 +3,10 @@ #include "core/graph.h" #include "archive/value_archive.h" +#include "core/registry.h" -namespace mmdeploy::graph { +namespace mmdeploy { +namespace graph { TaskGraph::Handle* TaskGraph::Add(TaskFunction fn) { function_.push_back(std::move(fn)); @@ -14,7 +16,8 @@ TaskGraph::Handle* TaskGraph::Add(TaskFunction fn) { TaskGraph::~TaskGraph() { for (int i = 0; i < time_.size(); ++i) { - INFO("node {} ({}): {} ms", i, handle_[i]->name(), static_cast(time_[i]) / count_); + MMDEPLOY_INFO("node {} ({}): {} ms", i, handle_[i]->name(), + static_cast(time_[i]) / count_); } } @@ -75,4 +78,8 @@ std::vector> Context::Execute(Span()>> return graph_->Execute(tasks); } -} // namespace mmdeploy::graph +} // namespace graph + +MMDEPLOY_DEFINE_REGISTRY(graph::Node); + +} // namespace mmdeploy diff --git a/csrc/core/graph.h b/csrc/core/graph.h index 37c1aa189..d55afe70f 100644 --- a/csrc/core/graph.h +++ b/csrc/core/graph.h @@ -14,7 +14,9 @@ #include "taskflow/taskflow.hpp" #endif -namespace mmdeploy::graph { +namespace mmdeploy { + +namespace graph { using std::pair; using std::string; @@ -24,7 +26,7 @@ using std::vector; class TaskGraph; class Node; -class Context { +class MMDEPLOY_API Context { public: explicit Context(TaskGraph* graph) : graph_(graph) {} @@ -48,7 +50,7 @@ class Context { TaskGraph* graph_; }; -class TaskGraph { +class MMDEPLOY_API TaskGraph { friend class Context; public: @@ -65,6 +67,10 @@ class TaskGraph { ~TaskGraph(); + TaskGraph() = default; + TaskGraph(const TaskGraph&) = delete; + TaskGraph& operator=(const TaskGraph&) = delete; + Handle* Add(TaskFunction fn); Result Run(Value inputs); @@ -82,7 +88,7 @@ class TaskGraph { int64_t count_{}; }; -class Node { +class MMDEPLOY_API Node { public: virtual ~Node() = default; virtual void Build(TaskGraph& graph) = 0; @@ -96,6 +102,10 @@ class Node { std::vector outputs_; }; -} // namespace mmdeploy::graph +} // namespace graph + +MMDEPLOY_DECLARE_REGISTRY(graph::Node); + +} // namespace mmdeploy #endif // MMDEPLOY_SRC_EXPERIMENTAL_PIPELINE_IR_H_ diff --git a/csrc/core/logger.cpp b/csrc/core/logger.cpp index b858ce785..a1499cc07 100644 --- a/csrc/core/logger.cpp +++ b/csrc/core/logger.cpp @@ -9,6 +9,9 @@ #include #else #include +#if defined(_MSC_VER) +#include +#endif #endif #endif diff --git a/csrc/core/logger.h b/csrc/core/logger.h index 4f7c2ebb8..ff326c451 100644 --- a/csrc/core/logger.h +++ b/csrc/core/logger.h @@ -5,11 +5,13 @@ #include +#include "core/macro.h" + namespace mmdeploy { -spdlog::logger *GetLogger(); +MMDEPLOY_API spdlog::logger *GetLogger(); -void SetLogger(spdlog::logger *logger); +MMDEPLOY_API void SetLogger(spdlog::logger *logger); } // namespace mmdeploy @@ -86,18 +88,4 @@ void SetLogger(spdlog::logger *logger); #define MMDEPLOY_CRITICAL(...) (void)0; #endif -#undef CRITICAL -#undef ERROR -#undef WARN -#undef INFO -#undef DEBUG -#undef TRACE - -#define CRITICAL MMDEPLOY_CRITICAL -#define ERROR MMDEPLOY_ERROR -#define WARN MMDEPLOY_WARN -#define INFO MMDEPLOY_INFO -#define DEBUG MMDEPLOY_DEBUG -#define TRACE MMDEPLOY_TRACE - #endif // !CORE_LOG_H diff --git a/csrc/core/macro.h b/csrc/core/macro.h index f9822094e..6f52f0b5f 100644 --- a/csrc/core/macro.h +++ b/csrc/core/macro.h @@ -3,34 +3,119 @@ #ifndef MMDEPLOY_SRC_CORE_MARCO_H_ #define MMDEPLOY_SRC_CORE_MARCO_H_ +#ifndef MMDEPLOY_EXPORT #ifdef _MSC_VER -#ifdef SDK_EXPORTS -#define MM_SDK_API __declspec(dllexport) +#define MMDEPLOY_EXPORT __declspec(dllexport) #else -#define MM_SDK_API -#endif -#else /* _MSC_VER */ -#ifdef SDK_EXPORTS -#define MM_SDK_API __attribute__((visibility("default"))) -#else -#define MM_SDK_API +#define MMDEPLOY_EXPORT __attribute__((visibility("default"))) #endif #endif -#ifdef __cplusplus -#define CV_SDK_API extern "C" MM_SDK_API +#ifndef MMDEPLOY_API +#ifdef MMDEPLOY_API_EXPORTS +#define MMDEPLOY_API MMDEPLOY_EXPORT #else -#define CV_SDK_API MM_SDK_API +#define MMDEPLOY_API +#endif #endif -#define MMDEPLOY_CONCATENATE_IMPL(s1, s2) s1##s2 -#define MMDEPLOY_CONCATENATE(s1, s2) MMDEPLOY_CONCATENATE_IMPL(s1, s2) +#define _MMDEPLOY_PP_CONCAT_IMPL(s1, s2) s1##s2 +#define MMDEPLOY_PP_CONCAT(s1, s2) _MMDEPLOY_PP_CONCAT_IMPL(s1, s2) // ! Be aware of ODR violation when using __COUNTER__ #ifdef __COUNTER__ -#define MMDEPLOY_ANONYMOUS_VARIABLE(str) MMDEPLOY_CONCATENATE(str, __COUNTER__) +#define MMDEPLOY_ANONYMOUS_VARIABLE(str) MMDEPLOY_PP_CONCAT(str, __COUNTER__) #else -#define MMDEPLOY_ANONYMOUS_VARIABLE(str) MMDEPLOY_CONCATENATE(str, __LINE__) +#define MMDEPLOY_ANONYMOUS_VARIABLE(str) MMDEPLOY_PP_CONCAT(str, __LINE__) #endif +#define MMDEPLOY_PP_NARG(...) _MMDEPLOY_PP_NARG(__VA_ARGS__, _MMDEPLOY_PP_RESQ_N()) + +#define _MMDEPLOY_PP_NARG(...) _MMDEPLOY_PP_ARG_N(__VA_ARGS__) + +#define _MMDEPLOY_PP_ARG_N(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, \ + _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, \ + _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, \ + _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, \ + _59, _60, _61, _62, _63, N, ...) \ + N + +#define _MMDEPLOY_PP_RESQ_N() \ + 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, \ + 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, \ + 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 + +#define MMDEPLOY_PP_MAP_1(f, x) f(x) +#define MMDEPLOY_PP_MAP_2(f, x, ...) f(x), MMDEPLOY_PP_MAP_1(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_3(f, x, ...) f(x), MMDEPLOY_PP_MAP_2(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_4(f, x, ...) f(x), MMDEPLOY_PP_MAP_3(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_5(f, x, ...) f(x), MMDEPLOY_PP_MAP_4(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_6(f, x, ...) f(x), MMDEPLOY_PP_MAP_5(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_7(f, x, ...) f(x), MMDEPLOY_PP_MAP_6(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_8(f, x, ...) f(x), MMDEPLOY_PP_MAP_7(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_9(f, x, ...) f(x), MMDEPLOY_PP_MAP_8(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_10(f, x, ...) f(x), MMDEPLOY_PP_MAP_9(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_11(f, x, ...) f(x), MMDEPLOY_PP_MAP_10(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_12(f, x, ...) f(x), MMDEPLOY_PP_MAP_11(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_13(f, x, ...) f(x), MMDEPLOY_PP_MAP_12(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_14(f, x, ...) f(x), MMDEPLOY_PP_MAP_13(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_15(f, x, ...) f(x), MMDEPLOY_PP_MAP_14(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_16(f, x, ...) f(x), MMDEPLOY_PP_MAP_15(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_17(f, x, ...) f(x), MMDEPLOY_PP_MAP_16(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_18(f, x, ...) f(x), MMDEPLOY_PP_MAP_17(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_19(f, x, ...) f(x), MMDEPLOY_PP_MAP_18(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_20(f, x, ...) f(x), MMDEPLOY_PP_MAP_19(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_21(f, x, ...) f(x), MMDEPLOY_PP_MAP_20(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_22(f, x, ...) f(x), MMDEPLOY_PP_MAP_21(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_23(f, x, ...) f(x), MMDEPLOY_PP_MAP_22(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_24(f, x, ...) f(x), MMDEPLOY_PP_MAP_23(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_25(f, x, ...) f(x), MMDEPLOY_PP_MAP_24(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_26(f, x, ...) f(x), MMDEPLOY_PP_MAP_25(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_27(f, x, ...) f(x), MMDEPLOY_PP_MAP_26(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_28(f, x, ...) f(x), MMDEPLOY_PP_MAP_27(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_29(f, x, ...) f(x), MMDEPLOY_PP_MAP_28(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_30(f, x, ...) f(x), MMDEPLOY_PP_MAP_29(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_31(f, x, ...) f(x), MMDEPLOY_PP_MAP_30(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_32(f, x, ...) f(x), MMDEPLOY_PP_MAP_31(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_33(f, x, ...) f(x), MMDEPLOY_PP_MAP_32(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_34(f, x, ...) f(x), MMDEPLOY_PP_MAP_33(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_35(f, x, ...) f(x), MMDEPLOY_PP_MAP_34(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_36(f, x, ...) f(x), MMDEPLOY_PP_MAP_35(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_37(f, x, ...) f(x), MMDEPLOY_PP_MAP_36(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_38(f, x, ...) f(x), MMDEPLOY_PP_MAP_37(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_39(f, x, ...) f(x), MMDEPLOY_PP_MAP_38(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_40(f, x, ...) f(x), MMDEPLOY_PP_MAP_39(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_41(f, x, ...) f(x), MMDEPLOY_PP_MAP_40(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_42(f, x, ...) f(x), MMDEPLOY_PP_MAP_41(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_43(f, x, ...) f(x), MMDEPLOY_PP_MAP_42(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_44(f, x, ...) f(x), MMDEPLOY_PP_MAP_43(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_45(f, x, ...) f(x), MMDEPLOY_PP_MAP_44(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_46(f, x, ...) f(x), MMDEPLOY_PP_MAP_45(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_47(f, x, ...) f(x), MMDEPLOY_PP_MAP_46(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_48(f, x, ...) f(x), MMDEPLOY_PP_MAP_47(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_49(f, x, ...) f(x), MMDEPLOY_PP_MAP_48(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_50(f, x, ...) f(x), MMDEPLOY_PP_MAP_49(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_51(f, x, ...) f(x), MMDEPLOY_PP_MAP_50(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_52(f, x, ...) f(x), MMDEPLOY_PP_MAP_51(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_53(f, x, ...) f(x), MMDEPLOY_PP_MAP_52(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_54(f, x, ...) f(x), MMDEPLOY_PP_MAP_53(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_55(f, x, ...) f(x), MMDEPLOY_PP_MAP_54(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_56(f, x, ...) f(x), MMDEPLOY_PP_MAP_55(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_57(f, x, ...) f(x), MMDEPLOY_PP_MAP_56(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_58(f, x, ...) f(x), MMDEPLOY_PP_MAP_57(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_59(f, x, ...) f(x), MMDEPLOY_PP_MAP_58(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_60(f, x, ...) f(x), MMDEPLOY_PP_MAP_59(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_61(f, x, ...) f(x), MMDEPLOY_PP_MAP_60(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_62(f, x, ...) f(x), MMDEPLOY_PP_MAP_61(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_63(f, x, ...) f(x), MMDEPLOY_PP_MAP_62(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_64(f, x, ...) f(x), MMDEPLOY_PP_MAP_63(f, __VA_ARGS__) + +#define MMDEPLOY_PP_MAP(f, ...) \ + _MMDEPLOY_PP_MAP_IMPL1(f, MMDEPLOY_PP_NARG(__VA_ARGS__), __VA_ARGS__) + +#define _MMDEPLOY_PP_MAP_IMPL1(f, n, ...) \ + _MMDEPLOY_PP_MAP_IMPL2(f, MMDEPLOY_PP_CONCAT(MMDEPLOY_PP_MAP_, n), __VA_ARGS__) + +#define _MMDEPLOY_PP_MAP_IMPL2(f, M_, ...) M_(f, __VA_ARGS__) + #endif // MMDEPLOY_SRC_CORE_MARCO_H_ diff --git a/csrc/core/mat.h b/csrc/core/mat.h index d6ec4ba20..4702df7e9 100644 --- a/csrc/core/mat.h +++ b/csrc/core/mat.h @@ -11,7 +11,7 @@ namespace mmdeploy { -class Mat final { +class MMDEPLOY_API Mat final { public: Mat() = default; diff --git a/csrc/core/model.cpp b/csrc/core/model.cpp index 080504a2a..d4b6361a9 100644 --- a/csrc/core/model.cpp +++ b/csrc/core/model.cpp @@ -4,14 +4,7 @@ #include "core/logger.h" #include "core/model_impl.h" - -#if __GNUC__ >= 8 -#include -namespace fs = std::filesystem; -#else -#include -namespace fs = std::experimental::filesystem; -#endif +#include "core/utils/filesystem.h" using namespace std; @@ -19,7 +12,7 @@ namespace mmdeploy { Model::Model(const std::string& model_path) { if (auto r = Model::Init(model_path); !r) { - ERROR("load model failed. Its file path is '{}'", model_path); + MMDEPLOY_ERROR("load model failed. Its file path is '{}'", model_path); r.error().throw_exception(); } } @@ -28,7 +21,7 @@ Model::Model(const void* buffer, size_t size) { Init(buffer, size).value(); } Result Model::Init(const std::string& model_path) { if (!fs::exists(model_path)) { - ERROR("'{}' doesn't exist", model_path); + MMDEPLOY_ERROR("'{}' doesn't exist", model_path); return Status(eFileNotExist); } @@ -42,13 +35,13 @@ Result Model::Init(const std::string& model_path) { } OUTCOME_TRY(auto meta, impl->ReadMeta()); - INFO("{} successfully load sdk model {}", entry.name, model_path); + MMDEPLOY_INFO("{} successfully load sdk model {}", entry.name, model_path); impl_ = std::move(impl); meta_ = std::move(meta); return success(); } - ERROR("no ModelImpl can read sdk_model {}", model_path); + MMDEPLOY_ERROR("no ModelImpl can read sdk_model {}", model_path); return Status(eNotSupported); } @@ -63,13 +56,13 @@ Result Model::Init(const void* buffer, size_t size) { } OUTCOME_TRY(auto meta, impl->ReadMeta()); - INFO("{} successfully load sdk model {}", entry.name); + MMDEPLOY_INFO("{} successfully load sdk model {}", entry.name); impl_ = std::move(impl); meta_ = std::move(meta); return success(); } - ERROR("no ModelImpl can parse buffer"); + MMDEPLOY_ERROR("no ModelImpl can parse buffer"); return Status(eNotSupported); } @@ -79,7 +72,7 @@ Result Model::GetModelConfig(const std::string& name) const { return info; } } - ERROR("cannot find model '{}' in meta file", name); + MMDEPLOY_ERROR("cannot find model '{}' in meta file", name); return Status(eEntryNotFound); } @@ -87,14 +80,19 @@ Result Model::ReadFile(const std::string& file_path) noexcept { return impl_->ReadFile(file_path); } +ModelRegistry& ModelRegistry::Get() { + static ModelRegistry inst; + return inst; +} + Result ModelRegistry::Register(const std::string& name, Creator creator) { for (auto& entry : entries_) { if (entry.name == name) { - ERROR("{} is already registered", name); + MMDEPLOY_ERROR("{} is already registered", name); return Status(eFail); } } - INFO("Register '{}'", name); + MMDEPLOY_INFO("Register '{}'", name); entries_.push_back({name, std::move(creator)}); return success(); } diff --git a/csrc/core/model.h b/csrc/core/model.h index a9ce11eff..5193128c3 100644 --- a/csrc/core/model.h +++ b/csrc/core/model.h @@ -39,7 +39,7 @@ class ModelImpl; * in case of faster-rcnn model, it splits into two models, one is rpn and the * other is cnn for roi classification. */ -class Model { +class MMDEPLOY_API Model { public: Model() = default; @@ -115,7 +115,7 @@ class Model { * }; * ANewModelImplRegister a_new_model_impl_register; */ -class ModelRegistry { +class MMDEPLOY_API ModelRegistry { public: using Creator = std::function()>; struct Entry { @@ -126,10 +126,7 @@ class ModelRegistry { /** * @brief Return global instance of `ModelRegistry` */ - static ModelRegistry& Get() { - static ModelRegistry inst; - return inst; - } + static ModelRegistry& Get(); /** * @brief Register an sdk model format denoted by an specified `ModelImpl` diff --git a/csrc/core/module.cpp b/csrc/core/module.cpp index d21ea31bd..28857f420 100644 --- a/csrc/core/module.cpp +++ b/csrc/core/module.cpp @@ -6,7 +6,9 @@ namespace mmdeploy { -template class Registry; -template class Creator; +// template class Registry; +// template class Creator; + +MMDEPLOY_DEFINE_REGISTRY(Module); } // namespace mmdeploy diff --git a/csrc/core/module.h b/csrc/core/module.h index 96d0c5cff..6debc6a6d 100644 --- a/csrc/core/module.h +++ b/csrc/core/module.h @@ -4,17 +4,20 @@ #define MMDEPLOY_SRC_CORE_MODULE_H_ #include "core/macro.h" +#include "core/registry.h" #include "core/status_code.h" #include "core/value.h" namespace mmdeploy { -class MM_SDK_API Module { +class MMDEPLOY_API Module { public: virtual ~Module() = default; virtual Result Process(const Value& args) = 0; }; +MMDEPLOY_DECLARE_REGISTRY(Module); + } // namespace mmdeploy #endif // MMDEPLOY_SRC_CORE_MODULE_H_ diff --git a/csrc/core/net.cpp b/csrc/core/net.cpp index f54804250..9f057dc88 100644 --- a/csrc/core/net.cpp +++ b/csrc/core/net.cpp @@ -6,7 +6,9 @@ namespace mmdeploy { -template class Registry; -template class Creator; +// template class Registry; +// template class Creator; + +MMDEPLOY_DEFINE_REGISTRY(Net); } // namespace mmdeploy diff --git a/csrc/core/net.h b/csrc/core/net.h index b96551cf9..c49a7ceeb 100644 --- a/csrc/core/net.h +++ b/csrc/core/net.h @@ -22,6 +22,8 @@ class Net { virtual Result ForwardAsync(Event* event) = 0; }; +MMDEPLOY_DECLARE_REGISTRY(Net); + } // namespace mmdeploy #endif // MMDEPLOY_SRC_CORE_NET_H_ diff --git a/csrc/core/operator.cpp b/csrc/core/operator.cpp index c40bfee50..e4e536b2c 100644 --- a/csrc/core/operator.cpp +++ b/csrc/core/operator.cpp @@ -2,6 +2,8 @@ #include "operator.h" +#include + namespace mmdeploy::graph { Result Gather(const Value::Array& array, const vector& idxs, Value::Array& output) { diff --git a/csrc/core/operator.h b/csrc/core/operator.h index 0936fbfaf..c71c62f05 100644 --- a/csrc/core/operator.h +++ b/csrc/core/operator.h @@ -11,13 +11,18 @@ using std::string; using std::tuple; using std::vector; -Result Gather(const Value::Array& array, const vector& idxs, Value::Array& output); -Result Gather(Value::Array&& array, const vector& idxs, Value::Array& output); -Result Gather(const Value::Object& object, const vector& keys, - Value::Array& output); -Result Gather(Value::Object&& object, const vector& keys, Value::Array& output); -Result Scatter(Value::Array array, const vector& idxs, Value::Array& output); -Result Scatter(Value::Array array, const vector& keys, Value::Object& output); +MMDEPLOY_API Result Gather(const Value::Array& array, const vector& idxs, + Value::Array& output); +MMDEPLOY_API Result Gather(Value::Array&& array, const vector& idxs, + Value::Array& output); +MMDEPLOY_API Result Gather(const Value::Object& object, const vector& keys, + Value::Array& output); +MMDEPLOY_API Result Gather(Value::Object&& object, const vector& keys, + Value::Array& output); +MMDEPLOY_API Result Scatter(Value::Array array, const vector& idxs, + Value::Array& output); +MMDEPLOY_API Result Scatter(Value::Array array, const vector& keys, + Value::Object& output); inline Result Gather(const Value::Array& array, const vector& idxs) { Value::Array output; @@ -95,13 +100,13 @@ Result Unflatten(V&& input, const vector& idxs) { } // object of arrays -> array of objects, all arrays must be of same length -Result DistribOA(const Value& oa); +MMDEPLOY_API Result DistribOA(const Value& oa); // array of objects -> object of arrays, all objects must be isomorphic -Result DistribAO(const Value& ao); +MMDEPLOY_API Result DistribAO(const Value& ao); // array of arrays -> array of arrays, this is equivalent to transpose -Result DistribAA(const Value& a); +MMDEPLOY_API Result DistribAA(const Value& a); } // namespace mmdeploy::graph diff --git a/csrc/core/registry.cpp b/csrc/core/registry.cpp new file mode 100644 index 000000000..d0d543ee5 --- /dev/null +++ b/csrc/core/registry.cpp @@ -0,0 +1,46 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#include "core/registry.h" + +namespace mmdeploy { + +Registry::Registry() = default; + +Registry::~Registry() = default; + +bool Registry::AddCreator(Creator &creator) { + MMDEPLOY_DEBUG("Adding creator: {}", creator.GetName()); + auto key = creator.GetName(); + if (entries_.find(key) == entries_.end()) { + entries_.insert(std::make_pair(key, &creator)); + return true; + } + + for (auto iter = entries_.lower_bound(key); iter != entries_.upper_bound(key); ++iter) { + if (iter->second->GetVersion() == creator.GetVersion()) { + return false; + } + } + + entries_.insert(std::make_pair(key, &creator)); + return true; +} + +Creator *Registry::GetCreator(const std::string &type, int version) { + auto iter = entries_.find(type); + if (iter == entries_.end()) { + return nullptr; + } + if (0 == version) { + return iter->second; + } + + for (auto iter = entries_.lower_bound(type); iter != entries_.upper_bound(type); ++iter) { + if (iter->second->GetVersion() == version) { + return iter->second; + } + } + return nullptr; +} + +} // namespace mmdeploy diff --git a/csrc/core/registry.h b/csrc/core/registry.h index 03d5e3f23..bde878a35 100644 --- a/csrc/core/registry.h +++ b/csrc/core/registry.h @@ -9,6 +9,7 @@ #include #include +#include "macro.h" #include "value.h" namespace mmdeploy { @@ -30,73 +31,58 @@ using get_return_type_t = typename get_return_type::type; } // namespace detail +template +class Creator; + +template <> +class Creator { + public: + virtual ~Creator() = default; + virtual const char *GetName() const = 0; + virtual int GetVersion() const { return 0; } +}; + template -class Creator { +class Creator : public Creator { public: using ReturnType = detail::get_return_type_t; public: - virtual ~Creator() = default; - virtual const char *GetName() const = 0; - virtual int GetVersion() const = 0; virtual ReturnType Create(const Value &args) = 0; }; -template -class Registry { +template +class Registry; + +template <> +class MMDEPLOY_API Registry { public: - static Registry &Get() { - static Registry registry; - return registry; - } + Registry(); - bool AddCreator(Creator &creator) { - auto key = creator.GetName(); - if (entries_.find(key) == entries_.end()) { - entries_.insert(std::make_pair(key, &creator)); - return true; - } - - for (auto iter = entries_.lower_bound(key); iter != entries_.upper_bound(key); ++iter) { - if (iter->second->GetVersion() == creator.GetVersion()) { - return false; - } - } - - entries_.insert(std::make_pair(key, &creator)); - return true; - } + ~Registry(); + + bool AddCreator(Creator &creator); + + Creator *GetCreator(const std::string &type, int version = 0); + + private: + std::multimap *> entries_; +}; + +template +class Registry : public Registry { + public: + bool AddCreator(Creator &creator) { return Registry::AddCreator(creator); } Creator *GetCreator(const std::string &type, int version = 0) { - auto iter = entries_.find(type); - if (iter == entries_.end()) { - return nullptr; - } - if (0 == version) { - return iter->second; - } - - for (auto iter = entries_.lower_bound(type); iter != entries_.upper_bound(type); ++iter) { - if (iter->second->GetVersion() == version) { - return iter->second; - } - } - return nullptr; + auto creator = Registry::GetCreator(type, version); + return static_cast *>(creator); } - std::vector ListCreators() { - std::vector keys; - for (const auto &[key, _] : entries_) { - keys.push_back(key); - } - return keys; - } + static Registry &Get(); private: Registry() = default; - - private: - std::multimap *> entries_; }; template @@ -110,6 +96,17 @@ class Registerer { } // namespace mmdeploy +#define MMDEPLOY_DECLARE_REGISTRY(EntryType) \ + template <> \ + Registry &Registry::Get(); + +#define MMDEPLOY_DEFINE_REGISTRY(EntryType) \ + template <> \ + MMDEPLOY_EXPORT Registry &Registry::Get() { \ + static Registry v; \ + return v; \ + } + #define REGISTER_MODULE(EntryType, CreatorType) \ static ::mmdeploy::Registerer g_register_##EntryType##_##CreatorType{}; diff --git a/csrc/core/serialization.h b/csrc/core/serialization.h index aeea43bb5..6a37d8c3f 100644 --- a/csrc/core/serialization.h +++ b/csrc/core/serialization.h @@ -8,47 +8,14 @@ #include #include +#include "core/macro.h" #include "core/status_code.h" #include "mpl/detected.h" #include "mpl/type_traits.h" namespace mmdeploy { -#define _MMDEPLOY_NTH_ARG(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, \ - N, ...) \ - N - -#define _MMDEPLOY_ARCHIVE_1(x) MMDEPLOY_NVP(x) -#define _MMDEPLOY_ARCHIVE_2(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_1(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_3(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_2(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_4(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_3(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_5(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_4(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_6(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_5(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_7(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_6(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_8(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_7(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_9(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_8(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_10(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_9(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_11(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_10(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_12(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_11(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_13(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_12(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_14(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_13(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_15(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_14(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_16(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_15(__VA_ARGS__) - -#define _MMDEPLOY_ARCHIVE_DISPATCH(...) \ - _MMDEPLOY_NTH_ARG(__VA_ARGS__, _MMDEPLOY_ARCHIVE_16(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_15(__VA_ARGS__), _MMDEPLOY_ARCHIVE_14(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_13(__VA_ARGS__), _MMDEPLOY_ARCHIVE_12(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_11(__VA_ARGS__), _MMDEPLOY_ARCHIVE_10(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_9(__VA_ARGS__), _MMDEPLOY_ARCHIVE_8(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_7(__VA_ARGS__), _MMDEPLOY_ARCHIVE_6(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_5(__VA_ARGS__), _MMDEPLOY_ARCHIVE_4(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_3(__VA_ARGS__), _MMDEPLOY_ARCHIVE_2(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_1(__VA_ARGS__)) - -#define MMDEPLOY_ARCHIVE_NVP(archive, ...) archive(_MMDEPLOY_ARCHIVE_DISPATCH(__VA_ARGS__)) - -#define MMDEPLOY_ARCHIVE(archive, ...) archive(__VA_ARGS__) +#define MMDEPLOY_ARCHIVE_NVP(archive, ...) archive(MMDEPLOY_PP_MAP(MMDEPLOY_NVP, __VA_ARGS__)) #define MMDEPLOY_ARCHIVE_MEMBERS(...) \ template \ @@ -273,32 +240,26 @@ void load(Archive &&archive, T &&object) { } } -template -using save_t = decltype(save(std::declval(), std::declval())); - struct save_fn { template - auto operator()(Archive &&a, T &&v) const -> save_t { + auto operator()(Archive &&a, T &&v) const + -> decltype(save(std::forward(a), std::forward(v))) { return save(std::forward(a), std::forward(v)); } }; -template -using load_t = decltype(load(std::declval(), std::declval())); - struct load_fn { template - auto operator()(Archive &&a, T &&v) const -> load_t { + auto operator()(Archive &&a, T &&v) const + -> decltype(load(std::forward(a), std::forward(v))) { return load(std::forward(a), std::forward(v)); } }; -template -using serialize_t = decltype(serialize(std::declval(), std::declval())); - struct serialize_fn { template - auto operator()(Archive &&a, T &&v) const -> serialize_t { + auto operator()(Archive &&a, T &&v) const + -> decltype(serialize(std::forward(a), std::forward(v))) { return serialize(std::forward(a), std::forward(v)); } }; @@ -319,15 +280,18 @@ struct adl_serializer; template struct adl_serializer { template - static auto save(Archive &&a, T &&v) -> detail::save_t { + static auto save(Archive &&a, T &&v) + -> decltype(::mmdeploy::save(std::forward(a), std::forward(v))) { ::mmdeploy::save(std::forward(a), std::forward(v)); } template - static auto load(Archive &&a, T &&v) -> detail::load_t { + static auto load(Archive &&a, T &&v) + -> decltype(::mmdeploy::load(std::forward(a), std::forward(v))) { ::mmdeploy::load(std::forward(a), std::forward(v)); } template - static auto serialize(Archive &&a, T &&v) -> detail::serialize_t { + static auto serialize(Archive &&a, T &&v) + -> decltype(::mmdeploy::serialize(std::forward(a), std::forward(v))) { ::mmdeploy::serialize(std::forward(a), std::forward(v)); } }; diff --git a/csrc/core/status_code.h b/csrc/core/status_code.h index 3b719e677..f84ce8a88 100644 --- a/csrc/core/status_code.h +++ b/csrc/core/status_code.h @@ -5,6 +5,7 @@ #include +#include "core/macro.h" #include "outcome-experimental.hpp" #if MMDEPLOY_STATUS_USE_SOURCE_LOCATION #include "utils/source_location.h" @@ -71,7 +72,7 @@ inline const char *to_string(ErrorCode code) { } } -struct Status { +struct MMDEPLOY_API Status { ErrorCode ec{}; Status() = default; SYSTEM_ERROR2_NAMESPACE::status_code_domain::string_ref message() const; @@ -94,7 +95,7 @@ class StatusDomain; using StatusCode = SYSTEM_ERROR2_NAMESPACE::status_code; -class StatusDomain : public SYSTEM_ERROR2_NAMESPACE::status_code_domain { +class MMDEPLOY_API StatusDomain : public SYSTEM_ERROR2_NAMESPACE::status_code_domain { using _base = status_code_domain; public: diff --git a/csrc/core/tensor.cpp b/csrc/core/tensor.cpp index aed5d6c3e..6a040ce04 100644 --- a/csrc/core/tensor.cpp +++ b/csrc/core/tensor.cpp @@ -87,16 +87,16 @@ void Tensor::Reshape(const TensorShape& shape) { Result Tensor::CopyFrom(const Tensor& tensor, Stream stream) { if (desc_.shape.empty() || tensor.desc().shape.empty()) { - ERROR("uninitialized tensor"); + MMDEPLOY_ERROR("uninitialized tensor"); return Status(eInvalidArgument); } if (!(desc_.shape == tensor.desc().shape)) { - ERROR("mismatched shape {} vs {}", shape_string(desc_.shape), - shape_string(tensor.desc().shape)); + MMDEPLOY_ERROR("mismatched shape {} vs {}", shape_string(desc_.shape), + shape_string(tensor.desc().shape)); return Status(eShapeMismatch); } if (desc_.data_type != tensor.desc().data_type) { - ERROR("mismatched data type {} vs {}", desc_.data_type, tensor.desc().data_type); + MMDEPLOY_ERROR("mismatched data type {} vs {}", desc_.data_type, tensor.desc().data_type); return Status(eShapeMismatch); } Allocate(); @@ -112,17 +112,17 @@ Result Tensor::CopyFrom(const Tensor& tensor, Stream stream) { Result Tensor::CopyTo(Tensor& tensor, Stream stream) const { if (desc_.shape.empty() || tensor.desc().shape.empty()) { - ERROR("uninitialized tensor"); + MMDEPLOY_ERROR("uninitialized tensor"); return Status(eInvalidArgument); } if (!(desc_.shape == tensor.desc().shape)) { - ERROR("mismatched shape {} vs {}", shape_string(desc_.shape), - shape_string(tensor.desc().shape)); + MMDEPLOY_ERROR("mismatched shape {} vs {}", shape_string(desc_.shape), + shape_string(tensor.desc().shape)); return Status(eShapeMismatch); } if (desc_.data_type != tensor.desc().data_type) { - ERROR("mismatched data type {} vs {}", desc_.data_type, tensor.desc().data_type); + MMDEPLOY_ERROR("mismatched data type {} vs {}", desc_.data_type, tensor.desc().data_type); return Status(eShapeMismatch); } tensor.Allocate(); @@ -140,7 +140,7 @@ Result Tensor::CopyFrom(void* host_ptr, Stream stream) { return Status(eInvalidArgument); } if (desc_.shape.empty()) { - ERROR("uninitialized tensor"); + MMDEPLOY_ERROR("uninitialized tensor"); return Status(eInvalidArgument); } Allocate(); @@ -157,7 +157,7 @@ Result Tensor::CopyTo(void* host_ptr, Stream stream) const { return Status(eInvalidArgument); } if (desc_.shape.empty()) { - ERROR("uninitialized tensor"); + MMDEPLOY_ERROR("uninitialized tensor"); return Status(eInvalidArgument); } if (!stream) { diff --git a/csrc/core/tensor.h b/csrc/core/tensor.h index 264c6d7b8..78be82ad3 100644 --- a/csrc/core/tensor.h +++ b/csrc/core/tensor.h @@ -19,7 +19,7 @@ struct TensorDesc { std::string name; }; -class Tensor { +class MMDEPLOY_API Tensor { public: Tensor() = default; Tensor(const Tensor&) = default; diff --git a/csrc/core/utils/device_utils.h b/csrc/core/utils/device_utils.h index 81621efec..65422664e 100644 --- a/csrc/core/utils/device_utils.h +++ b/csrc/core/utils/device_utils.h @@ -14,7 +14,8 @@ namespace mmdeploy { * @param stream * @return */ -Result MakeAvailableOnDevice(const Mat& src, const Device& device, Stream& stream); +MMDEPLOY_API Result MakeAvailableOnDevice(const Mat& src, const Device& device, + Stream& stream); /** * @@ -23,7 +24,8 @@ Result MakeAvailableOnDevice(const Mat& src, const Device& device, Stream& * @param stream * @return */ -Result MakeAvailableOnDevice(const Tensor& src, const Device& device, Stream& stream); +MMDEPLOY_API Result MakeAvailableOnDevice(const Tensor& src, const Device& device, + Stream& stream); } // namespace mmdeploy #endif // MMDEPLOY_TRANSFORM_UTILS_H diff --git a/csrc/core/utils/filesystem.h b/csrc/core/utils/filesystem.h new file mode 100644 index 000000000..7aca6a8d8 --- /dev/null +++ b/csrc/core/utils/filesystem.h @@ -0,0 +1,15 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef MMDEPLOY_CSRC_CORE_UTILS_FILESYSTEM_H_ +#define MMDEPLOY_CSRC_CORE_UTILS_FILESYSTEM_H_ + +// TODO: what about clang? +#if __GNUC__ >= 8 || _MSC_VER +#include +namespace fs = std::filesystem; +#else +#include +namespace fs = std::experimental::filesystem; +#endif + +#endif // MMDEPLOY_CSRC_CORE_UTILS_FILESYSTEM_H_ diff --git a/csrc/core/utils/formatter.h b/csrc/core/utils/formatter.h index 14075b386..af28f8c9c 100644 --- a/csrc/core/utils/formatter.h +++ b/csrc/core/utils/formatter.h @@ -13,7 +13,7 @@ namespace mmdeploy { class Value; -std::string format_value(const Value& value); +MMDEPLOY_API std::string format_value(const Value& value); } // namespace mmdeploy diff --git a/csrc/core/utils/source_location.h b/csrc/core/utils/source_location.h index b7362cc12..f0d579b76 100644 --- a/csrc/core/utils/source_location.h +++ b/csrc/core/utils/source_location.h @@ -3,7 +3,7 @@ #ifndef MMDEPLOY_SRC_UTILS_SOURCE_LOCATION_H_ #define MMDEPLOY_SRC_UTILS_SOURCE_LOCATION_H_ -#if __has_include() +#if __has_include() && !_MSC_VER #include namespace mmdeploy { using SourceLocation = std::source_location; diff --git a/csrc/core/utils/stacktrace.h b/csrc/core/utils/stacktrace.h index 1c1cacbb7..53b1a44b9 100644 --- a/csrc/core/utils/stacktrace.h +++ b/csrc/core/utils/stacktrace.h @@ -4,6 +4,7 @@ #define MMDEPLOY_SRC_CORE_STACKTRACE_H_ #include +#include namespace mmdeploy { diff --git a/csrc/core/value.h b/csrc/core/value.h index b73fba430..324133056 100644 --- a/csrc/core/value.h +++ b/csrc/core/value.h @@ -650,22 +650,22 @@ class Value { template bool contains(Key&& key) const { - return _unwrap().template _contains(std::forward(key)); + return _unwrap()._contains(std::forward(key)); } template iterator find(Key&& key) { - return _unwrap().template _find(std::forward(key)); + return _unwrap()._find(std::forward(key)); } template const_iterator find(Key&& key) const { - return _unwrap().template _find(std::forward(key)); + return _unwrap()._find(std::forward(key)); } template T value(const typename Object::key_type& key, const T& default_value) const { - return _unwrap().template _value(key, default_value); + return _unwrap()._value(key, default_value); } iterator begin() { return _unwrap()._begin(); } diff --git a/csrc/device/cpu/CMakeLists.txt b/csrc/device/cpu/CMakeLists.txt index 226d0894b..f7e7c4611 100644 --- a/csrc/device/cpu/CMakeLists.txt +++ b/csrc/device/cpu/CMakeLists.txt @@ -2,10 +2,14 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_cpu_device) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) file(GLOB_RECURSE SRCS "*.cpp") -build_target(${PROJECT_NAME} "${SRCS}") -target_link_libraries(${PROJECT_NAME} PUBLIC pthread PRIVATE mmdeploy::core) + +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") + +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) +target_link_libraries(${PROJECT_NAME} PRIVATE Threads::Threads) + add_library(mmdeploy::device::cpu ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/device/cuda/CMakeLists.txt b/csrc/device/cuda/CMakeLists.txt index 6a36e513b..1ac67bb8b 100644 --- a/csrc/device/cuda/CMakeLists.txt +++ b/csrc/device/cuda/CMakeLists.txt @@ -9,17 +9,13 @@ if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.18.0") cmake_policy(SET CMP0104 OLD) endif () -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) -set_targets(${PROJECT_NAME} CUDA_DEVICE_OBJ CUDA_DEVICE_STATIC CUDA_DEVICE_SHARED) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(SRCS cuda_device.cpp cuda_builtin_kernels.cu) -build_target(${PROJECT_NAME} "${SRCS}") +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") target_include_directories(${PROJECT_NAME} PUBLIC ${CUDA_INCLUDE_DIRS}) target_link_directories(${PROJECT_NAME} PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/lib64) -target_link_libraries(${PROJECT_NAME} - PRIVATE mmdeploy::core - PUBLIC cudart cuda) +target_link_libraries(${PROJECT_NAME} PRIVATE cudart cuda) add_library(mmdeploy::device::cuda ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/device/cuda/buddy_allocator.h b/csrc/device/cuda/buddy_allocator.h index f94ec1c04..3e26a0edf 100644 --- a/csrc/device/cuda/buddy_allocator.h +++ b/csrc/device/cuda/buddy_allocator.h @@ -25,7 +25,7 @@ class BuddyAllocator { block_count_ = size / block_size_; if (!IsPowerOfTwo(block_count_)) { block_count_ = RoundToPowerOfTwo(block_count_); - WARN("Rounding up block_count to next power of 2 {}", block_count_); + MMDEPLOY_WARN("Rounding up block_count to next power of 2 {}", block_count_); } base_ = LogPowerOfTwo(block_count_); size_ = block_size_ * block_count_; @@ -34,17 +34,18 @@ class BuddyAllocator { free_.resize(base_ + 1); Build(1, 0); Add(1, 0); - ERROR("size = {}, block_size = {}, block_count = {}", size_, block_size_, block_count_); + MMDEPLOY_ERROR("size = {}, block_size = {}, block_count = {}", size_, block_size_, + block_count_); size = size_; for (int i = 0; i <= base_; ++i) { - ERROR("level {}, size = {}", i, size); + MMDEPLOY_ERROR("level {}, size = {}", i, size); size /= 2; } } ~BuddyAllocator() { for (int i = 0; i < free_.size(); ++i) { - ERROR("free_[{}].size(): {}", i, free_[i].size()); + MMDEPLOY_ERROR("free_[{}].size(): {}", i, free_[i].size()); } gDefaultAllocator().Deallocate(memory_, size_); } @@ -62,7 +63,7 @@ class BuddyAllocator { } } if (level < 0) { - WARN("failed to allocate memory size = {} bytes", n); + MMDEPLOY_WARN("failed to allocate memory size = {} bytes", n); return nullptr; } for (; level < n_level; ++level) { @@ -80,7 +81,7 @@ class BuddyAllocator { std::lock_guard lock{mutex_}; auto offset = static_cast(p) - static_cast(memory_); if (offset < 0 || offset % block_size_) { - ERROR("invalid address: {}", p); + MMDEPLOY_ERROR("invalid address: {}", p); } offset /= static_cast(block_size_); auto level = GetLevel(n); diff --git a/csrc/device/cuda/cuda_builtin_kernels.cu b/csrc/device/cuda/cuda_builtin_kernels.cu index c2cf5460c..463da8136 100644 --- a/csrc/device/cuda/cuda_builtin_kernels.cu +++ b/csrc/device/cuda/cuda_builtin_kernels.cu @@ -1,5 +1,7 @@ // Copyright (c) OpenMMLab. All rights reserved. +#include + namespace mmdeploy { namespace cuda { @@ -17,8 +19,8 @@ __global__ void FillKernel(void* dst, size_t dst_size, const void* pattern, size int Fill(void* dst, size_t dst_size, const void* pattern, size_t pattern_size, cudaStream_t stream) { - const uint n_threads = 256; - const uint n_blocks = (dst_size + n_threads - 1) / n_threads; + const unsigned int n_threads = 256; + const unsigned int n_blocks = (dst_size + n_threads - 1) / n_threads; FillKernel<<>>(dst, dst_size, pattern, pattern_size); diff --git a/csrc/device/cuda/cuda_device.cpp b/csrc/device/cuda/cuda_device.cpp index 768a267d9..9825182e9 100644 --- a/csrc/device/cuda/cuda_device.cpp +++ b/csrc/device/cuda/cuda_device.cpp @@ -79,9 +79,9 @@ Allocator CreateDefaultAllocator() { using namespace device_allocator; AllocatorImplPtr allocator = std::make_shared(); allocator = std::make_shared(allocator, "cudaMalloc"); - allocator = std::make_shared(allocator, -1, .0); + allocator = std::make_shared(allocator, -1, .5); allocator = std::make_shared(allocator, "Tree"); - INFO("Default CUDA allocator initialized"); + MMDEPLOY_INFO("Default CUDA allocator initialized"); return Access::create(allocator); } @@ -265,7 +265,7 @@ void CudaPlatformImpl::PerDeviceData::init() { CudaPlatformImpl::CudaPlatformImpl() { int count{}; if (auto err = cudaGetDeviceCount(&count); err != cudaSuccess) { - ERROR("error getting device count: {}", cudaGetErrorString(err)); + MMDEPLOY_ERROR("error getting device count: {}", cudaGetErrorString(err)); throw_exception(eFail); } per_device_data_storage_.reserve(count); diff --git a/csrc/device/cuda/cuda_device.h b/csrc/device/cuda/cuda_device.h index 71623d42b..d4588d2fd 100644 --- a/csrc/device/cuda/cuda_device.h +++ b/csrc/device/cuda/cuda_device.h @@ -16,6 +16,16 @@ class CudaPlatformImpl : public PlatformImpl { public: CudaPlatformImpl(); + ~CudaPlatformImpl() override { + // The CUDA driver may have already shutdown before the platform dtor is called. + // As a workaround, simply leak per device resources and let the driver handle it + // FIXME: maybe a pair of global mmdeploy_init/deinit function would be a + // better solution + for (auto& data : per_device_data_storage_) { + data.release(); + } + } + const char* GetPlatformName() const noexcept override { return "cuda"; } shared_ptr CreateBuffer(Device device) override; diff --git a/csrc/device/cuda/default_allocator.h b/csrc/device/cuda/default_allocator.h index ca4d794e9..a8b2177cc 100644 --- a/csrc/device/cuda/default_allocator.h +++ b/csrc/device/cuda/default_allocator.h @@ -16,11 +16,11 @@ class DefaultAllocator { public: DefaultAllocator() = default; ~DefaultAllocator() { - ERROR("=== CUDA Default Allocator ==="); - ERROR(" Allocation: count={}, size={}MB, time={}ms", alloc_count_, - alloc_size_ / (1024 * 1024.f), alloc_time_ / 1000000.f); - ERROR("Deallocation: count={}, size={}MB, time={}ms", dealloc_count_, - dealloc_size_ / (1024 * 1024.f), dealloc_time_ / 1000000.f); + MMDEPLOY_ERROR("=== CUDA Default Allocator ==="); + MMDEPLOY_ERROR(" Allocation: count={}, size={}MB, time={}ms", alloc_count_, + alloc_size_ / (1024 * 1024.f), alloc_time_ / 1000000.f); + MMDEPLOY_ERROR("Deallocation: count={}, size={}MB, time={}ms", dealloc_count_, + dealloc_size_ / (1024 * 1024.f), dealloc_time_ / 1000000.f); } [[nodiscard]] void* Allocate(std::size_t n) { void* p{}; @@ -29,7 +29,7 @@ class DefaultAllocator { auto t1 = std::chrono::high_resolution_clock::now(); alloc_time_ += (int64_t)std::chrono::duration(t1 - t0).count(); if (ret != cudaSuccess) { - ERROR("error allocating cuda memory: {}", cudaGetErrorString(ret)); + MMDEPLOY_ERROR("error allocating cuda memory: {}", cudaGetErrorString(ret)); return nullptr; } alloc_count_ += 1; @@ -43,7 +43,7 @@ class DefaultAllocator { auto t1 = std::chrono::high_resolution_clock::now(); dealloc_time_ += (int64_t)std::chrono::duration(t1 - t0).count(); if (ret != cudaSuccess) { - ERROR("error deallocating cuda memory: {}", cudaGetErrorString(ret)); + MMDEPLOY_ERROR("error deallocating cuda memory: {}", cudaGetErrorString(ret)); return; } dealloc_count_ += 1; diff --git a/csrc/device/cuda/linear_allocator.h b/csrc/device/cuda/linear_allocator.h index 15be01bc3..59133e933 100644 --- a/csrc/device/cuda/linear_allocator.h +++ b/csrc/device/cuda/linear_allocator.h @@ -25,11 +25,11 @@ class LinearAllocator { std::size_t space = base_ + size_ - ptr_; if (std::align(16, n, ptr, space)) { - ERROR("success n={}, total={}, count={}", n, total_, count_); + MMDEPLOY_ERROR("success n={}, total={}, count={}", n, total_, count_); ptr_ = static_cast(ptr) + n; return ptr; } - ERROR("fallback {}, total={}, count={}", n, total_, count_); + MMDEPLOY_ERROR("fallback {}, total={}, count={}", n, total_, count_); return gDefaultAllocator().Allocate(n); } void Deallocate(void* _p, std::size_t n) { @@ -43,7 +43,7 @@ class LinearAllocator { } total_ -= n; --count_; - ERROR("deallocate total={}, count={}", total_, count_); + MMDEPLOY_ERROR("deallocate total={}, count={}", total_, count_); if (total_ == 0) { assert(count_ == 0); ptr_ = base_; diff --git a/csrc/device/device_allocator.h b/csrc/device/device_allocator.h index 4539e12d4..06bb5730d 100644 --- a/csrc/device/device_allocator.h +++ b/csrc/device/device_allocator.h @@ -162,12 +162,14 @@ class Stats : public AllocatorImpl { : allocator_(std::move(allocator)), name_(std::move(name)) {} ~Stats() override { - INFO("=== {} ===", name_); - INFO(" Allocation: count={}, size={}MB, time={}ms", data_.allocation_count, - data_.allocated_bytes / (1024 * 1024.f), static_cast(data_.allocation_time)); - INFO("Deallocation: count={}, size={}MB, time={}ms", data_.deallocation_count, - data_.deallocated_bytes / (1024 * 1024.f), static_cast(data_.deallocation_time)); - INFO("Peak memory usage: size={}MB", data_.peak / (1024 * 1024.f)); + MMDEPLOY_INFO("=== {} ===", name_); + MMDEPLOY_INFO(" Allocation: count={}, size={}MB, time={}ms", data_.allocation_count, + data_.allocated_bytes / (1024 * 1024.f), + static_cast(data_.allocation_time)); + MMDEPLOY_INFO("Deallocation: count={}, size={}MB, time={}ms", data_.deallocation_count, + data_.deallocated_bytes / (1024 * 1024.f), + static_cast(data_.deallocation_time)); + MMDEPLOY_INFO("Peak memory usage: size={}MB", data_.peak / (1024 * 1024.f)); } Block Allocate(size_t size) noexcept override { @@ -281,10 +283,10 @@ class Bucketizer : public AllocatorImpl { Bucketizer(const AllocatorCreator& creator, size_t min_size, size_t max_size, size_t step_size) : min_size_(min_size), max_size_(max_size), step_size_(step_size) { for (auto base = min_size_; base < max_size_; base += step_size_) { - // ERROR("{}, {}", base, base + step_size - 1); + // MMDEPLOY_ERROR("{}, {}", base, base + step_size - 1); allocator_.push_back(creator(base, base + step_size - 1)); } - // ERROR("{}", allocator_.size()); + // MMDEPLOY_ERROR("{}", allocator_.size()); } Block Allocate(size_t size) noexcept override { diff --git a/csrc/experimental/collection.h b/csrc/experimental/collection.h index e26be6da9..a65de2ec6 100644 --- a/csrc/experimental/collection.h +++ b/csrc/experimental/collection.h @@ -1,93 +1,93 @@ -// Copyright (c) OpenMMLab. All rights reserved. - -#ifndef MMDEPLOY_SRC_EXPERIMENTAL_COLLECTION_H_ -#define MMDEPLOY_SRC_EXPERIMENTAL_COLLECTION_H_ - -#include "token.h" - -namespace mmdeploy { - -class Collection { - public: - template - friend Collection& operator<<(Collection& c, const Token& value) { - c.put(value); - return c; - } - - template - friend const Collection& operator>>(const Collection& c, Token& value) { - c.get(value); - return c; - } - - template - Result maybe() const { - T token; - if (get(token)) { - return token; - } - return Status(eFail); - } - - private: - std::vector keys_; - std::vector> values_; - - template - void put(const Token& value) { - keys_.push_back(Token::key()); - values_.push_back(std::make_shared>(value)); - } - - template - bool get(Token& value) const { - for (int i = 0; i < keys_.size(); ++i) { - if (keys_[i] == Token::key()) { - value = *static_cast*>(values_[i].get()); - return true; - } - } - return false; - } -}; - -namespace detail { - -template -struct function_traits { - template - static std::tuple get_args(std::function); - - template - static R get_ret(std::function); - - using args_t = decltype(get_args(std::function{std::declval()})); - using ret_t = decltype(get_ret(std::function{std::declval()})); -}; - -// TODO: obtain first error -// TODO: combine all errors -template > -Result Apply(F&& f, const Result&... args) { - if ((... && args)) { - return std::invoke(std::forward(f), args.value()...); - } - return Status(eFail); -} - -template > -Result ApplyImpl(F&& f, const Collection& c, std::tuple*) { - return Apply(std::forward(f), c.maybe()...); -} - -} // namespace detail - -template ::args_t> -decltype(auto) Apply(F&& f, const Collection& c) { - return detail::ApplyImpl(std::forward(f), c, std::add_pointer_t{}); -} - -} // namespace mmdeploy - -#endif // MMDEPLOY_SRC_EXPERIMENTAL_COLLECTION_H_ +//// Copyright (c) OpenMMLab. All rights reserved. +// +//#ifndef MMDEPLOY_SRC_EXPERIMENTAL_COLLECTION_H_ +//#define MMDEPLOY_SRC_EXPERIMENTAL_COLLECTION_H_ +// +//#include "token.h" +// +// namespace mmdeploy { +// +// class Collection { +// public: +// template +// friend Collection& operator<<(Collection& c, const Token& value) { +// c.put(value); +// return c; +// } +// +// template +// friend const Collection& operator>>(const Collection& c, Token& value) { +// c.get(value); +// return c; +// } +// +// template +// Result maybe() const { +// T token; +// if (get(token)) { +// return token; +// } +// return Status(eFail); +// } +// +// private: +// std::vector keys_; +// std::vector> values_; +// +// template +// void put(const Token& value) { +// keys_.push_back(Token::key()); +// values_.push_back(std::make_shared>(value)); +// } +// +// template +// bool get(Token& value) const { +// for (int i = 0; i < keys_.size(); ++i) { +// if (keys_[i] == Token::key()) { +// value = *static_cast*>(values_[i].get()); +// return true; +// } +// } +// return false; +// } +//}; +// +// namespace detail { +// +// template +// struct function_traits { +// template +// static std::tuple get_args(std::function); +// +// template +// static R get_ret(std::function); +// +// using args_t = decltype(get_args(std::function{std::declval()})); +// using ret_t = decltype(get_ret(std::function{std::declval()})); +//}; +// +//// TODO: obtain first error +//// TODO: combine all errors +// template > +// Result Apply(F&& f, const Result&... args) { +// if ((... && args)) { +// return std::invoke(std::forward(f), args.value()...); +// } +// return Status(eFail); +// } +// +// template > +// Result ApplyImpl(F&& f, const Collection& c, std::tuple*) { +// return Apply(std::forward(f), c.maybe()...); +// } +// +// } // namespace detail +// +// template ::args_t> +// decltype(auto) Apply(F&& f, const Collection& c) { +// return detail::ApplyImpl(std::forward(f), c, std::add_pointer_t{}); +// } +// +// } // namespace mmdeploy +// +//#endif // MMDEPLOY_SRC_EXPERIMENTAL_COLLECTION_H_ diff --git a/csrc/experimental/module_adapter.h b/csrc/experimental/module_adapter.h index 8d652cfb4..581e7f2be 100644 --- a/csrc/experimental/module_adapter.h +++ b/csrc/experimental/module_adapter.h @@ -31,7 +31,7 @@ struct InvokeImpl { std::forward(ts)...); return make_ret_val(std::move(ret)); } catch (const std::exception& e) { - ERROR("unhandled exception: {}", e.what()); + MMDEPLOY_ERROR("unhandled exception: {}", e.what()); return Status(eFail); } catch (...) { return Status(eFail); diff --git a/csrc/experimental/token.h b/csrc/experimental/token.h index e1c951352..6d6ae7f88 100644 --- a/csrc/experimental/token.h +++ b/csrc/experimental/token.h @@ -1,72 +1,72 @@ -// Copyright (c) OpenMMLab. All rights reserved. - -#ifndef MMDEPLOY_SRC_TOKEN_TOKEN_H_ -#define MMDEPLOY_SRC_TOKEN_TOKEN_H_ - -#include -#include -#include -#include -#include -#include - -#include "core/status_code.h" - -namespace mmdeploy { - -namespace token { - -template -using String = std::integer_sequence; - -// this is a GCC only extension -template -constexpr String operator""_ts() { - return {}; -} - -template -const char* c_str(String) { - static constexpr const char str[sizeof...(cs) + 1] = {cs..., '\0'}; - return str; -} - -} // namespace token - -// template -// static void* signature() { -// static char id = 0; -// return &id; +//// Copyright (c) OpenMMLab. All rights reserved. +// +//#ifndef MMDEPLOY_SRC_TOKEN_TOKEN_H_ +//#define MMDEPLOY_SRC_TOKEN_TOKEN_H_ +// +//#include +//#include +//#include +//#include +//#include +//#include +// +//#include "core/status_code.h" +// +// namespace mmdeploy { +// +// namespace token { +// +// template +// using String = std::integer_sequence; +// +//// this is a GCC only extension +// template +// constexpr String operator""_ts() { +// return {}; // } // -// using signature_t = decltype(signature()); - -template -struct Token { - using signature_t = void*; - using value_type = T; - - Token(T value = {}) : value_(value) {} // NOLINT - - operator T() const { return value_; } // NOLINT - static const char* key() { return token::c_str(Key{}); } - - T& operator*() { return value_; } - T* operator->() { return &value_; } - - private: - T value_; -}; - -template -class Identifier { - public: - constexpr explicit Identifier(const char* key) : key_(key) {} - const char* key_; -}; - -constexpr inline Identifier batch_size{"batch_size"}; - -} // namespace mmdeploy - -#endif // MMDEPLOY_SRC_TOKEN_TOKEN_H_ +// template +// const char* c_str(String) { +// static constexpr const char str[sizeof...(cs) + 1] = {cs..., '\0'}; +// return str; +// } +// +// } // namespace token +// +//// template +//// static void* signature() { +//// static char id = 0; +//// return &id; +//// } +//// +//// using signature_t = decltype(signature()); +// +// template +// struct Token { +// using signature_t = void*; +// using value_type = T; +// +// Token(T value = {}) : value_(value) {} // NOLINT +// +// operator T() const { return value_; } // NOLINT +// static const char* key() { return token::c_str(Key{}); } +// +// T& operator*() { return value_; } +// T* operator->() { return &value_; } +// +// private: +// T value_; +//}; +// +// template +// class Identifier { +// public: +// constexpr explicit Identifier(const char* key) : key_(key) {} +// const char* key_; +//}; +// +// constexpr inline Identifier batch_size{"batch_size"}; +// +//} // namespace mmdeploy +// +//#endif // MMDEPLOY_SRC_TOKEN_TOKEN_H_ diff --git a/csrc/graph/CMakeLists.txt b/csrc/graph/CMakeLists.txt index a0c09946e..e39fbcf5c 100644 --- a/csrc/graph/CMakeLists.txt +++ b/csrc/graph/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_graph) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(SRCS common.cpp inference.cpp @@ -10,7 +10,5 @@ set(SRCS task.cpp flatten.cpp unflatten.cpp) -build_target(${PROJECT_NAME} "${SRCS}") -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core) +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") add_library(mmdeploy::graph ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/graph/common.cpp b/csrc/graph/common.cpp index 7881b91f8..fba4b2361 100644 --- a/csrc/graph/common.cpp +++ b/csrc/graph/common.cpp @@ -10,7 +10,7 @@ mmdeploy::graph::BaseNode::BaseNode(const mmdeploy::Value& cfg) { from_value(cfg["output"], outputs_); name_ = cfg.value("name", ""); } catch (...) { - ERROR("error parsing config: {}", cfg); + MMDEPLOY_ERROR("error parsing config: {}", cfg); throw; } } diff --git a/csrc/graph/common.h b/csrc/graph/common.h index 95e8c2d08..a94d8c5ff 100644 --- a/csrc/graph/common.h +++ b/csrc/graph/common.h @@ -12,15 +12,16 @@ namespace mmdeploy::graph { template ::ReturnType> inline Result CreateFromRegistry(const Value& config, const char* key = "type") { - INFO("config: {}", config); + MMDEPLOY_INFO("config: {}", config); auto type = config[key].get(); auto creator = Registry::Get().GetCreator(type); if (!creator) { + MMDEPLOY_ERROR("failed to find module creator: {}", type); return Status(eEntryNotFound); } auto inst = creator->Create(config); if (!inst) { - ERROR("failed to create module: {}", type); + MMDEPLOY_ERROR("failed to create module: {}", type); return Status(eFail); } return std::move(inst); diff --git a/csrc/graph/flatten.cpp b/csrc/graph/flatten.cpp index 3153e586b..d180470eb 100644 --- a/csrc/graph/flatten.cpp +++ b/csrc/graph/flatten.cpp @@ -20,7 +20,7 @@ void FlattenNode::Build(TaskGraph& graph) { if (idxs.empty()) { idxs = std::move(idx); } else if (idx != idxs) { - ERROR("args does not have same structure"); + MMDEPLOY_ERROR("args does not have same structure"); return Status(eInvalidArgument); } rets.push_back(std::move(ret)); diff --git a/csrc/graph/inference.cpp b/csrc/graph/inference.cpp index 34d9c8a37..15c11d581 100644 --- a/csrc/graph/inference.cpp +++ b/csrc/graph/inference.cpp @@ -17,7 +17,7 @@ Inference::Inference(const Value& cfg) : BaseNode(cfg) { auto model_path = model_value.get(); model_ = Model(model_path); } else { - ERROR("unsupported model specification"); + MMDEPLOY_ERROR("unsupported model specification"); throw_exception(eInvalidArgument); } @@ -31,7 +31,7 @@ Inference::Inference(const Value& cfg) : BaseNode(cfg) { value["context"] = context; pipeline_ = std::make_unique(value); if (!pipeline_) { - ERROR("failed to create pipeline"); + MMDEPLOY_ERROR("failed to create pipeline"); throw_exception(eFail); } } diff --git a/csrc/graph/pipeline.cpp b/csrc/graph/pipeline.cpp index ef77a242b..4d668c249 100644 --- a/csrc/graph/pipeline.cpp +++ b/csrc/graph/pipeline.cpp @@ -21,7 +21,7 @@ Pipeline::Pipeline(const Value& cfg) : BaseNode(cfg["pipeline"]) { node_input_idx_.push_back(UpdateBindings(nodes_.back()->inputs(), kRead)); node_output_idx_.push_back(UpdateBindings(nodes_.back()->outputs(), kWrite)); } else { - ERROR("could not create {}:{}", name, type); + MMDEPLOY_ERROR("could not create {}:{}", name, type); throw_exception(eFail); } } @@ -57,7 +57,7 @@ std::vector Pipeline::UpdateBindings(const vector& names, Bind auto it = binding_name_to_idx_.lower_bound(name); if (it == binding_name_to_idx_.end() || it->first != name) { if (type == kRead) { - ERROR("unknown binding name: {}", name); + MMDEPLOY_ERROR("unknown binding name: {}", name); throw_exception(eEntryNotFound); } else { auto index = static_cast(binding_name_to_idx_.size()); diff --git a/csrc/graph/task.cpp b/csrc/graph/task.cpp index 0791a84a0..ee1f0ebf8 100644 --- a/csrc/graph/task.cpp +++ b/csrc/graph/task.cpp @@ -36,7 +36,7 @@ static size_t GetBatchSize(const Value& args) { Task::Task(const Value& cfg) : BaseNode(cfg) { auto module = CreateFromRegistry(cfg, "module"); if (!module) { - ERROR("failed to create task: {}", cfg); + MMDEPLOY_ERROR("failed to create task: {}", cfg); throw_exception(eFail); } module_ = std::move(module).value(); @@ -50,7 +50,8 @@ void Task::Build(TaskGraph& graph) { auto args = ctx.pop().array(); auto rets = Value::Array{}; auto batch_size = GetBatchSize(args); - // ERROR("name: {}, is_batched: {}, INPUT batch_size: {}", name_, is_batched_, batch_size); + // MMDEPLOY_ERROR("name: {}, is_batched: {}, INPUT batch_size: {}", name_, is_batched_, + // batch_size); if (!is_batched_ && batch_size) { rets.resize(outputs_.size(), Value::kArray); if (!is_thread_safe_) { @@ -86,7 +87,7 @@ void Task::Build(TaskGraph& graph) { rets = std::move(tmp).array(); } ctx.push(std::move(rets)); - // ERROR("name: {}, is_batched: {}, OUTPUT batch_size: {}", name_, is_batched_, + // MMDEPLOY_ERROR("name: {}, is_batched: {}, OUTPUT batch_size: {}", name_, is_batched_, // GetBatchSize(rets)); return success(); }); diff --git a/csrc/model/CMakeLists.txt b/csrc/model/CMakeLists.txt index fbeaaf605..ebfbf2167 100644 --- a/csrc/model/CMakeLists.txt +++ b/csrc/model/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.14) project(model) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(MODEL_NAMES "directory_model") if (${MMDEPLOY_ZIP_MODEL}) @@ -11,18 +11,17 @@ endif () foreach (MODEL_NAME ${MODEL_NAMES}) set(TARGET_MODEL_NAME mmdeploy_${MODEL_NAME}) - build_target(${TARGET_MODEL_NAME} ${MODEL_NAME}_impl.cpp) - target_link_libraries(${TARGET_MODEL_NAME} - PRIVATE mmdeploy::core - PUBLIC stdc++fs) + mmdeploy_add_module(${TARGET_MODEL_NAME} ${MODEL_NAME}_impl.cpp) + if (NOT MSVC) + target_link_libraries(${TARGET_MODEL_NAME} PUBLIC stdc++fs) + endif () if (${MODEL_NAME} STREQUAL "zip_model") find_package(libzip QUIET) if (libzip_FOUND) target_link_libraries(${TARGET_MODEL_NAME} PUBLIC libzip::zip) - else() + else () target_link_libraries(${TARGET_MODEL_NAME} PUBLIC zip) - endif() + endif () endif () add_library(mmdeploy::${MODEL_NAME} ALIAS ${TARGET_MODEL_NAME}) - export_module(${TARGET_MODEL_NAME}) endforeach () diff --git a/csrc/model/directory_model_impl.cpp b/csrc/model/directory_model_impl.cpp index 2de9d5ec2..202eafb12 100644 --- a/csrc/model/directory_model_impl.cpp +++ b/csrc/model/directory_model_impl.cpp @@ -5,17 +5,10 @@ #include "archive/json_archive.h" #include "core/model.h" #include "core/model_impl.h" +#include "core/utils/filesystem.h" using nlohmann::json; -#if __GNUC__ >= 8 -#include -namespace fs = std::filesystem; -#else -#include -namespace fs = std::experimental::filesystem; -#endif - namespace mmdeploy { class DirectoryModelImpl : public ModelImpl { @@ -52,7 +45,7 @@ class DirectoryModelImpl : public ModelImpl { from_json(json::parse(deploy_json), meta); return meta; } catch (std::exception& e) { - ERROR("exception happened: {}", e.what()); + MMDEPLOY_ERROR("exception happened: {}", e.what()); return Status(eFail); } } diff --git a/csrc/model/zip_model_impl.cpp b/csrc/model/zip_model_impl.cpp index 0f1479f64..54545860f 100644 --- a/csrc/model/zip_model_impl.cpp +++ b/csrc/model/zip_model_impl.cpp @@ -7,14 +7,8 @@ #include "core/logger.h" #include "core/model.h" #include "core/model_impl.h" +#include "core/utils/filesystem.h" #include "zip.h" -#if __GNUC__ >= 8 -#include -namespace fs = std::filesystem; -#else -#include -namespace fs = std::experimental::filesystem; -#endif using nlohmann::json; @@ -40,10 +34,10 @@ class ZipModelImpl : public ModelImpl { int ret = 0; zip_ = zip_open(model_path.c_str(), 0, &ret); if (ret != 0) { - INFO("open zip file {} failed, ret {}", model_path.c_str(), ret); + MMDEPLOY_INFO("open zip file {} failed, ret {}", model_path.c_str(), ret); return Status(eInvalidArgument); } - INFO("open sdk model file {} successfully", model_path.c_str()); + MMDEPLOY_INFO("open sdk model file {} successfully", model_path.c_str()); return InitZip(); } @@ -70,24 +64,25 @@ class ZipModelImpl : public ModelImpl { auto iter = file_index_.find(file_path); if (iter == file_index_.end()) { - ERROR("cannot find file {} under dir {}", file_path.c_str(), root_dir_.c_str()); + MMDEPLOY_ERROR("cannot find file {} under dir {}", file_path.c_str(), root_dir_.c_str()); return Status(eFail); } index = iter->second; struct zip_file* pzip = zip_fopen_index(zip_, index, 0); if (nullptr == pzip) { - ERROR("read file {} in zip file failed, whose index is {}", file_path.c_str(), index); + MMDEPLOY_ERROR("read file {} in zip file failed, whose index is {}", file_path.c_str(), + index); return Status(eFail); } struct zip_stat stat {}; if ((ret = zip_stat_index(zip_, index, 0, &stat)) < 0) { - ERROR("get stat of file {} error, ret {}", file_path.c_str(), ret); + MMDEPLOY_ERROR("get stat of file {} error, ret {}", file_path.c_str(), ret); return Status(eFail); } - DEBUG("file size {}", (int)stat.size); + MMDEPLOY_DEBUG("file size {}", (int)stat.size); std::vector buf(stat.size); if ((ret = zip_fread(pzip, buf.data(), stat.size)) < 0) { - ERROR("read data of file {} error, ret {}", file_path.c_str(), ret); + MMDEPLOY_ERROR("read data of file {} error, ret {}", file_path.c_str(), ret); return Status(eFail); } return std::string(buf.begin(), buf.end()); @@ -100,7 +95,7 @@ class ZipModelImpl : public ModelImpl { from_json(json::parse(deploy_json), meta); return meta; } catch (std::exception& e) { - ERROR("exception happened: {}", e.what()); + MMDEPLOY_ERROR("exception happened: {}", e.what()); return Status(eFail); } } @@ -108,7 +103,7 @@ class ZipModelImpl : public ModelImpl { private: Result InitZip() { int files = zip_get_num_files(zip_); - INFO("there are {} files in sdk model file", files); + MMDEPLOY_INFO("there are {} files in sdk model file", files); if (files == 0) { return Status(eFail); } @@ -119,9 +114,9 @@ class ZipModelImpl : public ModelImpl { fs::path path(stat.name); auto file_name = path.filename().string(); if (file_name == ".") { - DEBUG("{}-th file name is: {}, which is a directory", i, stat.name); + MMDEPLOY_DEBUG("{}-th file name is: {}, which is a directory", i, stat.name); } else { - DEBUG("{}-th file name is: {}, which is a file", i, stat.name); + MMDEPLOY_DEBUG("{}-th file name is: {}, which is a file", i, stat.name); file_index_[file_name] = i; } } diff --git a/csrc/net/CMakeLists.txt b/csrc/net/CMakeLists.txt index f339801c0..f411abe5e 100644 --- a/csrc/net/CMakeLists.txt +++ b/csrc/net/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_net_module) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) if ("trt" IN_LIST MMDEPLOY_TARGET_BACKENDS) add_subdirectory(trt) @@ -24,7 +24,5 @@ if ("openvino" IN_LIST MMDEPLOY_TARGET_BACKENDS) add_subdirectory(openvino) endif () -build_target(${PROJECT_NAME} net_module.cpp) -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core) +mmdeploy_add_module(${PROJECT_NAME} net_module.cpp) add_library(mmdeploy::net_module ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/net/ncnn/CMakeLists.txt b/csrc/net/ncnn/CMakeLists.txt index 5e83abbf5..3c0e1ff6e 100644 --- a/csrc/net/ncnn/CMakeLists.txt +++ b/csrc/net/ncnn/CMakeLists.txt @@ -2,19 +2,17 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_ncnn_net) -if("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES) - include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +if ("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES) + include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) - find_package(ncnn REQUIRED) - add_library(${PROJECT_NAME} SHARED ncnn_net.cpp) - target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core ncnn) - target_link_libraries( - ${PROJECT_NAME} PRIVATE -Wl,--whole-archive mmdeploy::ncnn_ops::static - -Wl,--no-whole-archive) - add_library(mmdeploy::ncnn_net ALIAS ${PROJECT_NAME}) - export_module(${PROJECT_NAME}) -else() - message( - ERROR - "'ncnn_net' is NOT supported in target devices: ${MMDEPLOY_TARGET_DEVICES}") -endif() + find_package(ncnn REQUIRED) + + mmdeploy_add_module(${PROJECT_NAME} ncnn_net.cpp) + target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy_ncnn_ops_obj) + target_link_libraries(${PROJECT_NAME} PRIVATE ncnn) + add_library(mmdeploy::ncnn_net ALIAS ${PROJECT_NAME}) +else () + message( + ERROR + "'ncnn_net' is NOT supported in target devices: ${MMDEPLOY_TARGET_DEVICES}") +endif () diff --git a/csrc/net/ncnn/ncnn_net.cpp b/csrc/net/ncnn/ncnn_net.cpp index 29b37242e..041580d34 100644 --- a/csrc/net/ncnn/ncnn_net.cpp +++ b/csrc/net/ncnn/ncnn_net.cpp @@ -5,6 +5,7 @@ #include "core/logger.h" #include "core/model.h" #include "core/utils/formatter.h" +#include "ncnn_ops_register.h" namespace mmdeploy { @@ -33,6 +34,8 @@ Result NCNNNet::Init(const Value& args) { OUTCOME_TRY(params_, model.ReadFile(config.net)); OUTCOME_TRY(weights_, model.ReadFile(config.weights)); + register_mmdeploy_custom_layers(net_); + OUTCOME_TRY(ncnn_status(net_.load_param_mem(params_.c_str()))); net_.load_model(reinterpret_cast(weights_.data())); @@ -107,7 +110,7 @@ class NCNNNetCreator : public Creator { if (auto r = p->Init(args)) { return p; } else { - ERROR("error creating NCNNNet: {}", r.error().message().c_str()); + MMDEPLOY_ERROR("error creating NCNNNet: {}", r.error().message().c_str()); return nullptr; } } diff --git a/csrc/net/net_module.cpp b/csrc/net/net_module.cpp index 7ebd78df9..1216d4d7e 100644 --- a/csrc/net/net_module.cpp +++ b/csrc/net/net_module.cpp @@ -24,7 +24,7 @@ struct NetModule::Impl { using Output = std::map; explicit Impl(const Value& args) { - DEBUG("Net Module cfg: {}", args); + MMDEPLOY_DEBUG("Net Module cfg: {}", args); auto init = [&]() -> Result { auto name = args["name"].get(); auto& context = args["context"]; @@ -34,7 +34,7 @@ struct NetModule::Impl { stream_ = context.value("stream", Stream::GetDefault(device_)); auto creator = Registry::Get().GetCreator(config.backend); if (!creator) { - ERROR("Net backend not found: {}", config.backend); + MMDEPLOY_ERROR("Net backend not found: {}", config.backend); return Status(eEntryNotFound); } auto net_cfg = args; @@ -82,13 +82,13 @@ struct NetModule::Impl { return shape; } if (shape[0] != 1) { - ERROR("unsupported shape for batch assemble: {}", shape); + MMDEPLOY_ERROR("unsupported shape for batch assemble: {}", shape); return Status(eNotSupported); } for (int i = 1; i < input.size(); ++i) { auto& sample = input[i]; if (sample.shape() != shape) { - ERROR("shapes are not consistent across the batch"); + MMDEPLOY_ERROR("shapes are not consistent across the batch"); return Status(eNotSupported); } } @@ -122,7 +122,7 @@ struct NetModule::Impl { if (auto it = sample.find(name); it != sample.end()) { tmp.push_back(it->second); } else { - ERROR("sample {} missing key {}", i, name); + MMDEPLOY_ERROR("sample {} missing key {}", i, name); return Status(eInvalidArgument); } } @@ -140,7 +140,7 @@ struct NetModule::Impl { auto& src = input_samples[i]; auto& dst = inputs_[i]; if (dst.shape() != input_shapes[i]) { - ERROR("inconsistent input shape, expect {}, got {}", input_shapes[i], dst.shape()); + MMDEPLOY_ERROR("inconsistent input shape, expect {}, got {}", input_shapes[i], dst.shape()); return Status(eFail); } if (src.size() > 1) { @@ -165,7 +165,7 @@ struct NetModule::Impl { if (tmp.size()) { OUTCOME_TRY(t.CopyTo(tmp, stream_)); } else { - WARN("copy skipped due to zero sized tensor"); + MMDEPLOY_WARN("copy skipped due to zero sized tensor"); } if (output.size() > 1) { for (int i = 0; i < output.size(); ++i) { diff --git a/csrc/net/openvino/CMakeLists.txt b/csrc/net/openvino/CMakeLists.txt index 14542aa94..6963739e2 100644 --- a/csrc/net/openvino/CMakeLists.txt +++ b/csrc/net/openvino/CMakeLists.txt @@ -3,15 +3,13 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_openvino_net) if ("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES) - include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) + include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) find_package(InferenceEngine REQUIRED) - add_library(${PROJECT_NAME} SHARED openvino_net.cpp) + mmdeploy_add_module(${PROJECT_NAME} openvino_net.cpp) target_link_libraries(${PROJECT_NAME} PRIVATE - mmdeploy::core ${InferenceEngine_LIBRARIES}) add_library(mmdeploy::openvino_net ALIAS ${PROJECT_NAME}) - export_module(${PROJECT_NAME}) else () message(ERROR "'openvino_net' is NOT supported in target devices: ${MMDEPLOY_TARGET_DEVICES}") endif () diff --git a/csrc/net/openvino/openvino_net.cpp b/csrc/net/openvino/openvino_net.cpp index af20899a6..1176967c9 100644 --- a/csrc/net/openvino/openvino_net.cpp +++ b/csrc/net/openvino/openvino_net.cpp @@ -3,17 +3,11 @@ #include -#if __GNUC__ >= 8 -#include -namespace fs = std::filesystem; -#else -#include -namespace fs = std::experimental::filesystem; -#endif #include #include "core/logger.h" #include "core/model.h" +#include "core/utils/filesystem.h" #include "core/utils/formatter.h" namespace mmdeploy { @@ -40,7 +34,7 @@ static Result ConvertElementType(InferenceEngine::Precision prec) { case InferenceEngine::Precision::ePrecision::I64: return DataType::kINT64; default: - ERROR("unsupported InferenceEngine Precision: {}", static_cast(type)); + MMDEPLOY_ERROR("unsupported InferenceEngine Precision: {}", static_cast(type)); return Status(eNotSupported); } } @@ -58,7 +52,7 @@ static Result ConvertPrecision(DataType case DataType::kINT64: return InferenceEngine::Precision::ePrecision::I64; default: - ERROR("unsupported DataType: {}", static_cast(type)); + MMDEPLOY_ERROR("unsupported DataType: {}", static_cast(type)); return Status(eNotSupported); } } @@ -99,7 +93,7 @@ Result OpenVINONet::Init(const Value& args) { bin_out << raw_bin; bin_out.close(); } catch (const std::exception& e) { - ERROR("unhandled exception when creating tmp xml/bin: {}", e.what()); + MMDEPLOY_ERROR("unhandled exception when creating tmp xml/bin: {}", e.what()); return Status(eFail); } @@ -116,8 +110,7 @@ Result OpenVINONet::Init(const Value& args) { OUTCOME_TRY(auto data_type, ConvertElementType(input_data->getPrecision())); const auto& size_vector = input_data->getTensorDesc().getDims(); TensorShape shape{size_vector.begin(), size_vector.end()}; - input_tensors_.emplace_back(TensorDesc{ - .device = device_, .data_type = data_type, .shape = shape, .name = input_name}); + input_tensors_.emplace_back(TensorDesc{device_, data_type, shape, input_name}); } // set output tensor @@ -128,8 +121,7 @@ Result OpenVINONet::Init(const Value& args) { OUTCOME_TRY(auto data_type, ConvertElementType(output_data->getPrecision())); const auto& size_vector = output_data->getDims(); TensorShape shape{size_vector.begin(), size_vector.end()}; - output_tensors_.emplace_back(TensorDesc{ - .device = device_, .data_type = data_type, .shape = shape, .name = output_name}); + output_tensors_.emplace_back(TensorDesc{device_, data_type, shape, output_name}); } // create request @@ -141,7 +133,7 @@ Result OpenVINONet::Init(const Value& args) { request_ = executable_network.CreateInferRequest(); } catch (const std::exception& e) { - ERROR("unhandled exception when creating OpenVINO: {}", e.what()); + MMDEPLOY_ERROR("unhandled exception when creating OpenVINO: {}", e.what()); return Status(eFail); } return success(); @@ -190,7 +182,7 @@ static Result SetBlob(InferenceEngine::InferRequest& request, Tensor& tens InferenceEngine::make_shared_blob(ie_desc, tensor.data())); break; default: - ERROR("unsupported DataType: {}", static_cast(desc.data_type)); + MMDEPLOY_ERROR("unsupported DataType: {}", static_cast(desc.data_type)); return Status(eNotSupported); } return success(); @@ -211,9 +203,7 @@ static Result GetBlob(InferenceEngine::InferRequest& request, Tensor& tens auto moutputHolder = moutput->rmap(); std::shared_ptr data(const_cast(moutputHolder.as()), [](void*) {}); - Tensor blob_tensor = { - TensorDesc{.device = device, .data_type = data_type, .shape = shape, .name = output_name}, - data}; + Tensor blob_tensor = {TensorDesc{device, data_type, shape, output_name}, data}; if (!std::equal(blob_tensor.shape().begin(), blob_tensor.shape().end(), tensor.shape().begin())) tensor.Reshape(shape); OUTCOME_TRY(tensor.CopyFrom(blob_tensor, stream)); @@ -272,11 +262,11 @@ class OpenVINONetCreator : public Creator { if (auto r = p->Init(args)) { return p; } else { - ERROR("error creating OpenVINONet: {}", r.error().message().c_str()); + MMDEPLOY_ERROR("error creating OpenVINONet: {}", r.error().message().c_str()); return nullptr; } } catch (const std::exception& e) { - ERROR("unhandled exception when creating OpenVINONet: {}", e.what()); + MMDEPLOY_ERROR("unhandled exception when creating OpenVINONet: {}", e.what()); return nullptr; } } diff --git a/csrc/net/ort/CMakeLists.txt b/csrc/net/ort/CMakeLists.txt index 4b7af7aa5..b4b78eff4 100644 --- a/csrc/net/ort/CMakeLists.txt +++ b/csrc/net/ort/CMakeLists.txt @@ -3,18 +3,12 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_ort_net) if ("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES) - include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) - add_library(${PROJECT_NAME} SHARED ort_net.cpp) - target_include_directories(${PROJECT_NAME} PUBLIC ${ONNXRUNTIME_DIR}/include) + include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) + mmdeploy_add_module(${PROJECT_NAME} ort_net.cpp) + target_include_directories(${PROJECT_NAME} PRIVATE ${ONNXRUNTIME_DIR}/include) target_link_directories(${PROJECT_NAME} PUBLIC ${ONNXRUNTIME_DIR}/lib) - target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core - PUBLIC onnxruntime) - target_link_libraries(${PROJECT_NAME} PRIVATE - -Wl,--whole-archive - mmdeploy::onnxruntime::ops::static - -Wl,--no-whole-archive) + target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy_onnxruntime_ops_obj) add_library(mmdeploy::ort_net ALIAS ${PROJECT_NAME}) - export_module(${PROJECT_NAME}) else () message(ERROR "'ort_net' is NOT supported in target devices: ${MMDEPLOY_TARGET_DEVICES}") endif () diff --git a/csrc/net/ort/ort_net.cpp b/csrc/net/ort/ort_net.cpp index 5a2fb2e63..10ab9f6e1 100644 --- a/csrc/net/ort/ort_net.cpp +++ b/csrc/net/ort/ort_net.cpp @@ -1,9 +1,13 @@ // Copyright (c) OpenMMLab. All rights reserved. + #include "ort_net.h" +#include + #include "core/logger.h" #include "core/model.h" #include "core/utils/formatter.h" +#include "onnxruntime_register.h" namespace mmdeploy { @@ -25,7 +29,7 @@ static Result ConvertElementType(ONNXTensorElementDataType type) { case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64: return DataType::kINT64; default: - ERROR("unsupported ONNXTensorElementDataType: {}", static_cast(type)); + MMDEPLOY_ERROR("unsupported ONNXTensorElementDataType: {}", static_cast(type)); return Status(eNotSupported); } } @@ -45,6 +49,9 @@ Result OrtNet::Init(const Value& args) { Ort::SessionOptions options; options.SetLogSeverityLevel(3); + + RegisterCustomOps(options, OrtGetApiBase()); + if (device_.is_device()) { OrtCUDAProviderOptions cuda_options{}; cuda_options.device_id = device_.device_id(); @@ -69,12 +76,11 @@ Result OrtNet::Init(const Value& args) { auto input_name = session_.GetInputName(i, allocator); auto type_info = session_.GetInputTypeInfo(i); auto shape = to_shape(type_info); - INFO("input {}, shape = {}", i, shape); + MMDEPLOY_INFO("input {}, shape = {}", i, shape); filter_shape(shape); OUTCOME_TRY(auto data_type, ConvertElementType(type_info.GetTensorTypeAndShapeInfo().GetElementType())); - input_tensors_.emplace_back( - TensorDesc{.device = device_, .data_type = data_type, .shape = shape, .name = input_name}); + input_tensors_.emplace_back(TensorDesc{device_, data_type, shape, input_name}); allocator.Free(input_name); } @@ -84,12 +90,11 @@ Result OrtNet::Init(const Value& args) { auto output_name = session_.GetOutputName(i, allocator); auto type_info = session_.GetOutputTypeInfo(i); auto shape = to_shape(type_info); - INFO("output {}, shape = {}", i, shape); + MMDEPLOY_INFO("output {}, shape = {}", i, shape); filter_shape(shape); OUTCOME_TRY(auto data_type, ConvertElementType(type_info.GetTensorTypeAndShapeInfo().GetElementType())); - output_tensors_.emplace_back( - TensorDesc{.device = device_, .data_type = data_type, .shape = shape, .name = output_name}); + output_tensors_.emplace_back(TensorDesc{device_, data_type, shape, output_name}); allocator.Free(output_name); } @@ -166,7 +171,7 @@ Result OrtNet::Forward() { OUTCOME_TRY(stream_.Wait()); } catch (const std::exception& e) { - ERROR(e.what()); + MMDEPLOY_ERROR(e.what()); return Status(eFail); } return success(); @@ -182,11 +187,11 @@ class OrtNetCreator : public Creator { if (auto r = p->Init(args)) { return p; } else { - ERROR("error creating OrtNet: {}", r.error().message().c_str()); + MMDEPLOY_ERROR("error creating OrtNet: {}", r.error().message().c_str()); return nullptr; } } catch (const std::exception& e) { - ERROR("unhandled exception when creating ORTNet: {}", e.what()); + MMDEPLOY_ERROR("unhandled exception when creating ORTNet: {}", e.what()); return nullptr; } } diff --git a/csrc/net/ppl/CMakeLists.txt b/csrc/net/ppl/CMakeLists.txt index cb6c0fb31..dd859f8e2 100644 --- a/csrc/net/ppl/CMakeLists.txt +++ b/csrc/net/ppl/CMakeLists.txt @@ -2,10 +2,10 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_pplnn_net) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) find_package(pplnn REQUIRED) -add_library(${PROJECT_NAME} SHARED ppl_net.cpp) +mmdeploy_add_module(${PROJECT_NAME} ppl_net.cpp) target_include_directories(${PROJECT_NAME} PUBLIC $) if ("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES) @@ -17,7 +17,5 @@ if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES) target_link_directories(${PROJECT_NAME} PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/lib64) endif () target_link_libraries(${PROJECT_NAME} - PRIVATE mmdeploy::core ${PPLNN_LIBRARIES} - PUBLIC nvrtc) + PRIVATE ${PPLNN_LIBRARIES} nvrtc) add_library(mmdeploy::pplnn_net ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/net/ppl/ppl_net.cpp b/csrc/net/ppl/ppl_net.cpp index 72e8f99ac..f0d4b1603 100644 --- a/csrc/net/ppl/ppl_net.cpp +++ b/csrc/net/ppl/ppl_net.cpp @@ -22,7 +22,7 @@ Result ppl_try(int code) { if (code == 0) { return success(); } - ERROR("ppl error: {}", ppl::common::GetRetCodeStr(code)); + MMDEPLOY_ERROR("ppl error: {}", ppl::common::GetRetCodeStr(code)); return Status(eFail); } @@ -86,9 +86,9 @@ Result PPLNet::Init(const Value& args) { /// debug only auto& desc = inputs_internal_[i]->GetShape(); std::vector shape_(desc.GetDims(), desc.GetDims() + desc.GetDimCount()); - DEBUG("input {}: datatype = {}, dataformat = {}, shape = {}", i, - ppl::common::GetDataTypeStr(desc.GetDataType()), - ppl::common::GetDataFormatStr(desc.GetDataFormat()), shape_); + MMDEPLOY_DEBUG("input {}: datatype = {}, dataformat = {}, shape = {}", i, + ppl::common::GetDataTypeStr(desc.GetDataType()), + ppl::common::GetDataFormatStr(desc.GetDataFormat()), shape_); } for (int i = 0; i < runtime->GetOutputCount(); ++i) { @@ -98,9 +98,9 @@ Result PPLNet::Init(const Value& args) { auto desc = outputs_internal_[i]->GetShape(); std::vector shape_(desc.GetDims(), desc.GetDims() + desc.GetDimCount()); - DEBUG("output {}: datatype = {}, dataformat = {}, shape = {}", i, - ppl::common::GetDataTypeStr(desc.GetDataType()), - ppl::common::GetDataFormatStr(desc.GetDataFormat()), shape_); + MMDEPLOY_DEBUG("output {}: datatype = {}, dataformat = {}, shape = {}", i, + ppl::common::GetDataTypeStr(desc.GetDataType()), + ppl::common::GetDataFormatStr(desc.GetDataFormat()), shape_); TensorShape shape(desc.GetDims(), desc.GetDims() + desc.GetDimCount()); } @@ -176,8 +176,8 @@ Result PPLNet::Forward() { auto& internal = *outputs_internal_[i]; auto format = internal.GetShape().GetDataFormat(); if (format != ppl::common::DATAFORMAT_NDARRAY) { - ERROR("output {}'s format is {}, only NDARRAY is currently supported", i, - ppl::common::GetDataFormatStr(format)); + MMDEPLOY_ERROR("output {}'s format is {}, only NDARRAY is currently supported", i, + ppl::common::GetDataFormatStr(format)); return Status(eNotSupported); } auto& external = outputs_external_[i]; @@ -200,7 +200,8 @@ Result PPLNet::Forward() { if (external.size() > 0) { OUTCOME_TRY(Tensor(external.desc(), data).CopyTo(external, stream_)); } else { - WARN("copy skipped due to zero sized tensor: {} {}", external.name(), external.shape()); + MMDEPLOY_WARN("copy skipped due to zero sized tensor: {} {}", external.name(), + external.shape()); } } } @@ -235,7 +236,7 @@ Result PPLNet::Reshape(Span input_shapes) { if (can_infer_output_shapes_) { OUTCOME_TRY(auto output_shapes, InferOutputShapes(input_shapes, prev_in_shapes, prev_out_shapes)); - // ERROR("inferred output shapes: {}", output_shapes); + // MMDEPLOY_ERROR("inferred output shapes: {}", output_shapes); for (int i = 0; i < outputs_external_.size(); ++i) { auto& output = outputs_external_[i]; output.Reshape(output_shapes[i]); @@ -304,7 +305,7 @@ class PPLNetCreator : public Creator { if (auto r = p->Init(args)) { return p; } else { - ERROR("error creating PPLNet: {}", r.error().message().c_str()); + MMDEPLOY_ERROR("error creating PPLNet: {}", r.error().message().c_str()); return nullptr; } } diff --git a/csrc/net/trt/CMakeLists.txt b/csrc/net/trt/CMakeLists.txt index 1368e9335..94f08070b 100644 --- a/csrc/net/trt/CMakeLists.txt +++ b/csrc/net/trt/CMakeLists.txt @@ -2,24 +2,16 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_trt_net) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) include(${CMAKE_SOURCE_DIR}/cmake/tensorrt.cmake) -add_library(${PROJECT_NAME} SHARED trt_net.cpp) +mmdeploy_add_module(${PROJECT_NAME} trt_net.cpp) target_include_directories(${PROJECT_NAME} PRIVATE ${TENSORRT_INCLUDE_DIR}) target_include_directories(${PROJECT_NAME} PRIVATE ${CUDNN_DIR}/include) target_include_directories(${PROJECT_NAME} PRIVATE ${CUDA_TOOLKIT_ROOT_DIR}/include) -target_link_directories(${PROJECT_NAME} PUBLIC ${CUDNN_DIR}/lib64) +target_link_directories(${PROJECT_NAME} PUBLIC ${CUDNN_DIR}/lib64 ${CUDNN_DIR}/lib/x64) +target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy_tensorrt_ops_obj) target_link_libraries(${PROJECT_NAME} PUBLIC ${TENSORRT_LIBRARY} cudnn) -target_link_libraries(${PROJECT_NAME} - PRIVATE mmdeploy::core - ) -target_link_libraries(${PROJECT_NAME} - PRIVATE -Wl,--whole-archive - mmdeploy::tensorrt_ops::static - -Wl,--no-whole-archive - ) add_library(mmdeploy::trt_net ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/net/trt/trt_net.cpp b/csrc/net/trt/trt_net.cpp index 6f4cb940a..9300aad10 100644 --- a/csrc/net/trt/trt_net.cpp +++ b/csrc/net/trt/trt_net.cpp @@ -18,14 +18,14 @@ class TRTLogger : public nvinfer1::ILogger { void log(Severity severity, const char* msg) noexcept override { switch (severity) { case Severity::kINFO: - // INFO("TRTNet: {}", msg); + // MMDEPLOY_INFO("TRTNet: {}", msg); break; case Severity::kWARNING: - WARN("TRTNet: {}", msg); + MMDEPLOY_WARN("TRTNet: {}", msg); break; case Severity::kERROR: case Severity::kINTERNAL_ERROR: - ERROR("TRTNet: {}", msg); + MMDEPLOY_ERROR("TRTNet: {}", msg); break; default: break; @@ -72,7 +72,7 @@ static inline Result trt_try(bool code, const char* msg = nullptr, Status return success(); } if (msg) { - ERROR("{}", msg); + MMDEPLOY_ERROR("{}", msg); } return e; } @@ -102,7 +102,7 @@ Result TRTNet::Init(const Value& args) { auto& context = args["context"]; device_ = context["device"].get(); if (device_.is_host()) { - ERROR("TRTNet: device must be a GPU!"); + MMDEPLOY_ERROR("TRTNet: device must be a GPU!"); return Status(eNotSupported); } stream_ = context["stream"].get(); @@ -129,19 +129,18 @@ Result TRTNet::Init(const Value& args) { auto binding_name = engine_->getBindingName(i); auto dims = engine_->getBindingDimensions(i); if (engine_->isShapeBinding(i)) { - ERROR("shape binding is not supported."); + MMDEPLOY_ERROR("shape binding is not supported."); return Status(eNotSupported); } OUTCOME_TRY(auto dtype, MapDataType(engine_->getBindingDataType(i))); - TensorDesc desc{ - .device = device_, .data_type = dtype, .shape = to_shape(dims), .name = binding_name}; + TensorDesc desc{device_, dtype, to_shape(dims), binding_name}; if (engine_->bindingIsInput(i)) { - DEBUG("input binding {} {} {}", i, binding_name, to_string(dims)); + MMDEPLOY_DEBUG("input binding {} {} {}", i, binding_name, to_string(dims)); input_ids_.push_back(i); input_names_.emplace_back(binding_name); input_tensors_.emplace_back(desc, Buffer()); } else { - DEBUG("output binding {} {} {}", i, binding_name, to_string(dims)); + MMDEPLOY_DEBUG("output binding {} {} {}", i, binding_name, to_string(dims)); output_ids_.push_back(i); output_names_.emplace_back(binding_name); output_tensors_.emplace_back(desc, Buffer()); @@ -169,17 +168,17 @@ Result TRTNet::Reshape(Span input_shapes) { } for (int i = 0; i < input_tensors_.size(); ++i) { auto dims = to_dims(input_shapes[i]); - // ERROR("input shape: {}", to_string(dims)); + // MMDEPLOY_ERROR("input shape: {}", to_string(dims)); TRT_TRY(context_->setBindingDimensions(input_ids_[i], dims)); input_tensors_[i].Reshape(input_shapes[i]); } if (!context_->allInputDimensionsSpecified()) { - ERROR("not all input dimensions specified"); + MMDEPLOY_ERROR("not all input dimensions specified"); return Status(eFail); } for (int i = 0; i < output_tensors_.size(); ++i) { auto dims = context_->getBindingDimensions(output_ids_[i]); - // ERROR("output shape: {}", to_string(dims)); + // MMDEPLOY_ERROR("output shape: {}", to_string(dims)); output_tensors_[i].Reshape(to_shape(dims)); } return success(); diff --git a/csrc/preprocess/CMakeLists.txt b/csrc/preprocess/CMakeLists.txt index ab1084198..503ead8f8 100644 --- a/csrc/preprocess/CMakeLists.txt +++ b/csrc/preprocess/CMakeLists.txt @@ -8,8 +8,7 @@ if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES) add_subdirectory(cuda) endif () -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) -build_target(${PROJECT_NAME} transform_module.cpp) -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) +mmdeploy_add_module(${PROJECT_NAME} transform_module.cpp) +target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::transform) add_library(mmdeploy::transform_module ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/preprocess/cpu/CMakeLists.txt b/csrc/preprocess/cpu/CMakeLists.txt index 00c87cd7e..d2a75b10e 100644 --- a/csrc/preprocess/cpu/CMakeLists.txt +++ b/csrc/preprocess/cpu/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_cpu_transform_impl) include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(SRCS collect_impl.cpp @@ -11,13 +11,10 @@ set(SRCS image2tensor_impl.cpp load_impl.cpp normalize_impl.cpp - opencv_utils.cpp - opencv_utils.h pad_impl.cpp resize_impl.cpp) -build_target(${PROJECT_NAME} "${SRCS}") +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") target_link_libraries(${PROJECT_NAME} - PUBLIC opencv_imgproc opencv_core - PRIVATE mmdeploy::core) + PRIVATE mmdeploy::transform + mmdeploy_opencv_utils) add_library(mmdeploy::transform_impl::cpu ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/preprocess/cpu/pad_impl.cpp b/csrc/preprocess/cpu/pad_impl.cpp index 3a139d70a..c75ba4139 100644 --- a/csrc/preprocess/cpu/pad_impl.cpp +++ b/csrc/preprocess/cpu/pad_impl.cpp @@ -17,7 +17,7 @@ class PadImpl : public ::mmdeploy::PadImpl { {"reflect", cv::BORDER_REFLECT_101}, {"symmetric", cv::BORDER_REFLECT}}; if (border_map.find(arg_.padding_mode) == border_map.end()) { - ERROR("unsupported padding_mode '{}'", arg_.padding_mode); + MMDEPLOY_ERROR("unsupported padding_mode '{}'", arg_.padding_mode); throw std::invalid_argument("unsupported padding_mode"); } border_type_ = border_map[arg_.padding_mode]; diff --git a/csrc/preprocess/cuda/CMakeLists.txt b/csrc/preprocess/cuda/CMakeLists.txt index ac4def77d..76caeb214 100644 --- a/csrc/preprocess/cuda/CMakeLists.txt +++ b/csrc/preprocess/cuda/CMakeLists.txt @@ -2,29 +2,29 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_cuda_transform_impl CUDA CXX) -if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.18.0") - # suppress 'CMAKE_CUDA_ARCHITECTURES' warning - cmake_policy(SET CMP0104 OLD) -endif() +if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.18.0") + # suppress 'CMAKE_CUDA_ARCHITECTURES' warning + cmake_policy(SET CMP0104 OLD) +endif () find_package(pplcv REQUIRED) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(SRCS - crop_impl.cpp - image2tensor_impl.cpp - load_impl.cpp - normalize_impl.cpp - pad_impl.cpp - resize_impl.cpp - cast.cu - crop.cu - normalize.cu - transpose.cu) -build_target(${PROJECT_NAME} "${SRCS}") + crop_impl.cpp + image2tensor_impl.cpp + load_impl.cpp + normalize_impl.cpp + pad_impl.cpp + resize_impl.cpp + cast.cu + crop.cu + normalize.cu + transpose.cu) +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") +target_link_libraries(${PROJECT_NAME} PRIVATE + mmdeploy::transform ${PPLCV_LIBRARIES}) target_include_directories(${PROJECT_NAME} - PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/include) -target_link_libraries(${PROJECT_NAME} PRIVATE ${PPLCV_LIBRARIES} mmdeploy::core) + PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/include ${PPLCV_INCLUDE_DIRS}) add_library(mmdeploy::transform_impl::cuda ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/preprocess/cuda/crop_impl.cpp b/csrc/preprocess/cuda/crop_impl.cpp index 0808e8c73..eb6f64f83 100644 --- a/csrc/preprocess/cuda/crop_impl.cpp +++ b/csrc/preprocess/cuda/crop_impl.cpp @@ -43,7 +43,7 @@ class CenterCropImpl : public ::mmdeploy::CenterCropImpl { } else if (1 == c) { Crop(input, desc.shape[2], output, h, w, top, left, stream); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); return Status(eNotSupported); } } else if (DataType::kFLOAT == type) { @@ -54,11 +54,11 @@ class CenterCropImpl : public ::mmdeploy::CenterCropImpl { } else if (1 == c) { Crop(input, desc.shape[2], output, h, w, top, left, stream); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); return Status(eNotSupported); } } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); return Status(eNotSupported); } return dst_tensor; diff --git a/csrc/preprocess/cuda/load_impl.cpp b/csrc/preprocess/cuda/load_impl.cpp index 2d8df26b8..e7ffe506d 100644 --- a/csrc/preprocess/cuda/load_impl.cpp +++ b/csrc/preprocess/cuda/load_impl.cpp @@ -70,11 +70,11 @@ class PrepareImageImpl : public ::mmdeploy::PrepareImageImpl { BGRA2BGR(stream, src_h, src_w, src_stride, src_ptr, dst_stride, dst_ptr); break; default: - ERROR("src type: unknown type {}", img.pixel_format()); + MMDEPLOY_ERROR("src type: unknown type {}", img.pixel_format()); return Status(eNotSupported); } if (ret != 0) { - ERROR("color transfer from {} to BGR failed, ret {}", img.pixel_format(), ret); + MMDEPLOY_ERROR("color transfer from {} to BGR failed, ret {}", img.pixel_format(), ret); return Status(eFail); } if (arg_.to_float32) { @@ -140,11 +140,11 @@ class PrepareImageImpl : public ::mmdeploy::PrepareImageImpl { BGRA2GRAY(stream, src_h, src_w, src_stride, src_ptr, dst_stride, dst_ptr); break; default: - ERROR("src type: unknown type {}", img.pixel_format()); + MMDEPLOY_ERROR("src type: unknown type {}", img.pixel_format()); throw Status(eNotSupported); } if (ret != 0) { - ERROR("color transfer from {} to Gray failed", img.pixel_format()); + MMDEPLOY_ERROR("color transfer from {} to Gray failed", img.pixel_format()); throw Status(eFail); } if (arg_.to_float32) { diff --git a/csrc/preprocess/cuda/normalize.cu b/csrc/preprocess/cuda/normalize.cu index 696abcc7d..9536ecd05 100644 --- a/csrc/preprocess/cuda/normalize.cu +++ b/csrc/preprocess/cuda/normalize.cu @@ -1,5 +1,7 @@ // Copyright (c) OpenMMLab. All rights reserved. +#include + #include #include @@ -12,7 +14,7 @@ __global__ void normalize(const T* src, int height, int width, int stride, float int x = (int)(blockIdx.x * blockDim.x + threadIdx.x); int y = (int)(blockIdx.y * blockDim.y + threadIdx.y); - if (x >= width or y >= height) { + if (x >= width || y >= height) { return; } diff --git a/csrc/preprocess/cuda/normalize_impl.cpp b/csrc/preprocess/cuda/normalize_impl.cpp index 639f31aa9..48e664799 100644 --- a/csrc/preprocess/cuda/normalize_impl.cpp +++ b/csrc/preprocess/cuda/normalize_impl.cpp @@ -41,7 +41,7 @@ class NormalizeImpl : public ::mmdeploy::NormalizeImpl { Normalize(input, h, w, stride, output, arg_.mean.data(), arg_.std.data(), arg_.to_rgb, stream); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); return Status(eNotSupported); } } else if (DataType::kFLOAT == src_desc.data_type) { @@ -53,11 +53,11 @@ class NormalizeImpl : public ::mmdeploy::NormalizeImpl { Normalize(input, h, w, stride, output, arg_.mean.data(), arg_.std.data(), arg_.to_rgb, stream); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); return Status(eNotSupported); } } else { - ERROR("unsupported data type {}", src_desc.data_type); + MMDEPLOY_ERROR("unsupported data type {}", src_desc.data_type); assert(0); return Status(eNotSupported); } diff --git a/csrc/preprocess/cuda/pad_impl.cpp b/csrc/preprocess/cuda/pad_impl.cpp index ae567cb09..77781c048 100644 --- a/csrc/preprocess/cuda/pad_impl.cpp +++ b/csrc/preprocess/cuda/pad_impl.cpp @@ -14,12 +14,20 @@ namespace cuda { class PadImpl : public ::mmdeploy::PadImpl { public: explicit PadImpl(const Value& args) : ::mmdeploy::PadImpl(args) { +#if PPLCV_VERSION_MAJOR >= 0 && PPLCV_VERSION_MINOR >= 6 && PPLCV_VERSION_PATCH >= 2 + map border_map{{"constant", ppl::cv::BORDER_CONSTANT}, + {"edge", ppl::cv::BORDER_REPLICATE}, + {"reflect", ppl::cv::BORDER_REFLECT_101}, + { "symmetric", + ppl::cv::BORDER_REFLECT }}; +#else map border_map{{"constant", ppl::cv::BORDER_TYPE_CONSTANT}, {"edge", ppl::cv::BORDER_TYPE_REPLICATE}, {"reflect", ppl::cv::BORDER_TYPE_REFLECT_101}, {"symmetric", ppl::cv::BORDER_TYPE_REFLECT}}; +#endif if (border_map.find(arg_.padding_mode) == border_map.end()) { - ERROR("unsupported padding_mode '{}'", arg_.padding_mode); + MMDEPLOY_ERROR("unsupported padding_mode '{}'", arg_.padding_mode); throw_exception(eNotSupported); } padding_mode_ = border_map[arg_.padding_mode]; @@ -55,7 +63,7 @@ class PadImpl : public ::mmdeploy::PadImpl { dst_buffer, padding[1], padding[3], padding[0], padding[2], padding_mode_, arg_.pad_val); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); assert(0); return Status(eNotSupported); } @@ -71,17 +79,17 @@ class PadImpl : public ::mmdeploy::PadImpl { stream, height, width, width * c, src_buffer, dst_width * c, dst_buffer, padding[1], padding[3], padding[0], padding[2], padding_mode_, (ppl::cv::uchar)arg_.pad_val); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); assert(0); return Status(eNotSupported); } } else { - ERROR("unsupported data type {}", desc.data_type); + MMDEPLOY_ERROR("unsupported data type {}", desc.data_type); assert(0); return Status(eNotSupported); } if (ret != 0) { - ERROR("unexpected exception happened"); + MMDEPLOY_ERROR("unexpected exception happened"); assert(0); return Status(eNotSupported); } diff --git a/csrc/preprocess/cuda/resize_impl.cpp b/csrc/preprocess/cuda/resize_impl.cpp index ce0a891c3..8a3766480 100644 --- a/csrc/preprocess/cuda/resize_impl.cpp +++ b/csrc/preprocess/cuda/resize_impl.cpp @@ -14,7 +14,7 @@ class ResizeImpl final : public ::mmdeploy::ResizeImpl { public: explicit ResizeImpl(const Value& args) : ::mmdeploy::ResizeImpl(args) { if (arg_.interpolation != "bilinear" && arg_.interpolation != "nearest") { - ERROR("{} interpolation is not supported", arg_.interpolation); + MMDEPLOY_ERROR("{} interpolation is not supported", arg_.interpolation); throw_exception(eNotSupported); } } @@ -33,7 +33,7 @@ class ResizeImpl final : public ::mmdeploy::ResizeImpl { } else if (tensor.data_type() == DataType::kFLOAT) { OUTCOME_TRY(ResizeDispatch(src_tensor, dst_tensor, stream)); } else { - ERROR("unsupported data type {}", tensor.data_type()); + MMDEPLOY_ERROR("unsupported data type {}", tensor.data_type()); return Status(eNotSupported); } return dst_tensor; @@ -42,23 +42,23 @@ class ResizeImpl final : public ::mmdeploy::ResizeImpl { private: template ppl::common::RetCode DispatchImpl(Args&&... args) { -#ifdef PPLCV_VERSION_MAJOR +#if PPLCV_VERSION_MAJOR >= 0 && PPLCV_VERSION_MINOR >= 6 && PPLCV_VERSION_PATCH >= 2 if (arg_.interpolation == "bilinear") { return ppl::cv::cuda::Resize(std::forward(args)..., - ppl::cv::INTERPOLATION_TYPE_LINEAR); + ppl::cv::INTERPOLATION_LINEAR); } if (arg_.interpolation == "nearest") { return ppl::cv::cuda::Resize(std::forward(args)..., - ppl::cv::INTERPOLATION_TYPE_NEAREST_POINT); + ppl::cv::INTERPOLATION_NEAREST_POINT); } - #else -#warning "support for ppl.cv < 0.6 is deprecated and will be dropped in the future" if (arg_.interpolation == "bilinear") { - return ppl::cv::cuda::ResizeLinear(std::forward(args)...); + return ppl::cv::cuda::Resize(std::forward(args)..., + ppl::cv::INTERPOLATION_TYPE_LINEAR); } if (arg_.interpolation == "nearest") { - return ppl::cv::cuda::ResizeNearestPoint(std::forward(args)...); + return ppl::cv::cuda::Resize(std::forward(args)..., + ppl::cv::INTERPOLATION_TYPE_NEAREST_POINT); } #endif return ppl::common::RC_UNSUPPORTED; @@ -82,7 +82,7 @@ class ResizeImpl final : public ::mmdeploy::ResizeImpl { } else if (4 == c) { ret = DispatchImpl(stream, h, w, w * c, input, dst_h, dst_w, dst_w * c, output); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); return Status(eNotSupported); } return ret == 0 ? success() : Result(Status(eFail)); diff --git a/csrc/preprocess/transform/CMakeLists.txt b/csrc/preprocess/transform/CMakeLists.txt index e9a9c1402..8e13a67ae 100644 --- a/csrc/preprocess/transform/CMakeLists.txt +++ b/csrc/preprocess/transform/CMakeLists.txt @@ -2,21 +2,19 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_transform) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(SRCS - collect.cpp - compose.cpp - crop.cpp - image2tensor.cpp - load.cpp - normalize.cpp - pad.cpp - resize.cpp - transform.cpp) -build_target(${PROJECT_NAME} "${SRCS}") + collect.cpp + compose.cpp + crop.cpp + image2tensor.cpp + load.cpp + normalize.cpp + pad.cpp + resize.cpp + transform.cpp) +mmdeploy_add_module(${PROJECT_NAME} LIBRARY "${SRCS}") target_include_directories( - ${PROJECT_NAME} PUBLIC $) -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core) + ${PROJECT_NAME} PUBLIC $) add_library(mmdeploy::transform ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/preprocess/transform/collect.cpp b/csrc/preprocess/transform/collect.cpp index 673514c78..d01d1cf4b 100644 --- a/csrc/preprocess/transform/collect.cpp +++ b/csrc/preprocess/transform/collect.cpp @@ -26,7 +26,7 @@ CollectImpl::CollectImpl(const Value &args) { } Result CollectImpl::Process(const Value &input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); Value output; // collect 'ori_img' and 'attribute' from `input`, because those two fields @@ -45,7 +45,7 @@ Result CollectImpl::Process(const Value &input) { } for (auto &key : arg_.keys) { if (!input.contains(key)) { - ERROR("missed key '{}' in input", key); + MMDEPLOY_ERROR("missed key '{}' in input", key); // return Status(eInvalidArgument); return Status(eInvalidArgument); } else { @@ -53,7 +53,7 @@ Result CollectImpl::Process(const Value &input) { } } - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } @@ -77,4 +77,6 @@ class CollectCreator : public Creator { REGISTER_MODULE(Transform, CollectCreator); +MMDEPLOY_DEFINE_REGISTRY(CollectImpl); + } // namespace mmdeploy diff --git a/csrc/preprocess/transform/collect.h b/csrc/preprocess/transform/collect.h index 92439120f..327c5191e 100644 --- a/csrc/preprocess/transform/collect.h +++ b/csrc/preprocess/transform/collect.h @@ -6,7 +6,7 @@ #include "transform.h" namespace mmdeploy { -class CollectImpl : public Module { +class MMDEPLOY_API CollectImpl : public Module { public: explicit CollectImpl(const Value& args); ~CollectImpl() = default; @@ -24,7 +24,7 @@ class CollectImpl : public Module { ArgType arg_; }; -class Collect : public Transform { +class MMDEPLOY_API Collect : public Transform { public: explicit Collect(const Value& args, int version = 0); ~Collect() = default; @@ -35,6 +35,8 @@ class Collect : public Transform { std::unique_ptr impl_; }; +MMDEPLOY_DECLARE_REGISTRY(CollectImpl); + } // namespace mmdeploy #endif // MMDEPLOY_COLLECT_H diff --git a/csrc/preprocess/transform/compose.cpp b/csrc/preprocess/transform/compose.cpp index 57417eb48..a52b6848a 100644 --- a/csrc/preprocess/transform/compose.cpp +++ b/csrc/preprocess/transform/compose.cpp @@ -17,15 +17,15 @@ Compose::Compose(const Value& args, int version) : Transform(args) { for (auto cfg : args["transforms"]) { cfg["context"] = context; auto type = cfg.value("type", std::string{}); - DEBUG("creating transform: {} with cfg: {}", type, mmdeploy::to_json(cfg).dump(2)); + MMDEPLOY_DEBUG("creating transform: {} with cfg: {}", type, mmdeploy::to_json(cfg).dump(2)); auto creator = Registry::Get().GetCreator(type, version); if (!creator) { - ERROR("unable to find creator: {}", type); + MMDEPLOY_ERROR("unable to find creator: {}", type); throw std::invalid_argument("unable to find creator"); } auto transform = creator->Create(cfg); if (!transform) { - ERROR("failed to create transform: {}", type); + MMDEPLOY_ERROR("failed to create transform: {}", type); throw std::invalid_argument("failed to create transform"); } transforms_.push_back(std::move(transform)); diff --git a/csrc/preprocess/transform/compose.h b/csrc/preprocess/transform/compose.h index 3472d3e20..41f170371 100644 --- a/csrc/preprocess/transform/compose.h +++ b/csrc/preprocess/transform/compose.h @@ -7,7 +7,7 @@ namespace mmdeploy { -class Compose : public Transform { +class MMDEPLOY_API Compose : public Transform { public: explicit Compose(const Value& args, int version = 0); ~Compose() override = default; diff --git a/csrc/preprocess/transform/crop.cpp b/csrc/preprocess/transform/crop.cpp index d2b9977dc..1ea8867ca 100644 --- a/csrc/preprocess/transform/crop.cpp +++ b/csrc/preprocess/transform/crop.cpp @@ -24,7 +24,7 @@ CenterCropImpl::CenterCropImpl(const Value& args) : TransformImpl(args) { } Result CenterCropImpl::Process(const Value& input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); auto img_fields = GetImageFields(input); // copy input data, and update its properties @@ -63,14 +63,14 @@ Result CenterCropImpl::Process(const Value& input) { } } - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } CenterCrop::CenterCrop(const Value& args, int version) : Transform(args) { auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - ERROR("'CenterCrop' is not supported on '{}' platform", specified_platform_); + MMDEPLOY_ERROR("'CenterCrop' is not supported on '{}' platform", specified_platform_); throw std::domain_error("'Resize' is not supported on specified platform"); } impl_ = impl_creator->Create(args); @@ -87,4 +87,5 @@ class CenterCropCreator : public Creator { }; REGISTER_MODULE(Transform, CenterCropCreator); +MMDEPLOY_DEFINE_REGISTRY(CenterCropImpl); } // namespace mmdeploy diff --git a/csrc/preprocess/transform/crop.h b/csrc/preprocess/transform/crop.h index 46bd50737..76c567271 100644 --- a/csrc/preprocess/transform/crop.h +++ b/csrc/preprocess/transform/crop.h @@ -3,12 +3,14 @@ #ifndef MMDEPLOY_CROP_H #define MMDEPLOY_CROP_H +#include + #include "core/tensor.h" #include "transform.h" namespace mmdeploy { -class CenterCropImpl : public TransformImpl { +class MMDEPLOY_API CenterCropImpl : public TransformImpl { public: explicit CenterCropImpl(const Value& args); ~CenterCropImpl() = default; @@ -29,7 +31,7 @@ class CenterCropImpl : public TransformImpl { ArgType arg_; }; -class CenterCrop : public Transform { +class MMDEPLOY_API CenterCrop : public Transform { public: explicit CenterCrop(const Value& args, int version = 0); ~CenterCrop() = default; @@ -40,6 +42,8 @@ class CenterCrop : public Transform { std::unique_ptr impl_; }; +MMDEPLOY_DECLARE_REGISTRY(CenterCropImpl); + } // namespace mmdeploy #endif // MMDEPLOY_CROP_H diff --git a/csrc/preprocess/transform/image2tensor.cpp b/csrc/preprocess/transform/image2tensor.cpp index 2adf95990..e2ccd3bb5 100644 --- a/csrc/preprocess/transform/image2tensor.cpp +++ b/csrc/preprocess/transform/image2tensor.cpp @@ -16,7 +16,7 @@ ImageToTensorImpl::ImageToTensorImpl(const Value& args) : TransformImpl(args) { } Result ImageToTensorImpl::Process(const Value& input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); Value output = input; for (auto& key : arg_.keys) { assert(input.contains(key)); @@ -28,14 +28,14 @@ Result ImageToTensorImpl::Process(const Value& input) { OUTCOME_TRY(output[key], HWC2CHW(src_tensor)); } // for key - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } ImageToTensor::ImageToTensor(const Value& args, int version) : Transform(args) { auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - ERROR("'ImageToTensor' is not supported on '{}' platform", specified_platform_); + MMDEPLOY_ERROR("'ImageToTensor' is not supported on '{}' platform", specified_platform_); throw std::domain_error("'ImageToTensor' is not supported on specified platform"); } impl_ = impl_creator->Create(args); @@ -53,4 +53,5 @@ class ImageToTensorCreator : public Creator { int version_{1}; }; REGISTER_MODULE(Transform, ImageToTensorCreator); +MMDEPLOY_DEFINE_REGISTRY(ImageToTensorImpl); } // namespace mmdeploy diff --git a/csrc/preprocess/transform/image2tensor.h b/csrc/preprocess/transform/image2tensor.h index cca2c5db7..49eefd9f4 100644 --- a/csrc/preprocess/transform/image2tensor.h +++ b/csrc/preprocess/transform/image2tensor.h @@ -14,7 +14,7 @@ namespace mmdeploy { * it to (1, C, H, W). * */ -class ImageToTensorImpl : public TransformImpl { +class MMDEPLOY_API ImageToTensorImpl : public TransformImpl { public: ImageToTensorImpl(const Value& args); ~ImageToTensorImpl() = default; @@ -34,7 +34,7 @@ class ImageToTensorImpl : public TransformImpl { ArgType arg_; }; -class ImageToTensor : public Transform { +class MMDEPLOY_API ImageToTensor : public Transform { public: explicit ImageToTensor(const Value& args, int version = 0); ~ImageToTensor() = default; @@ -45,6 +45,8 @@ class ImageToTensor : public Transform { std::unique_ptr impl_; }; +MMDEPLOY_DECLARE_REGISTRY(ImageToTensorImpl); + } // namespace mmdeploy #endif // MMDEPLOY_IMAGE2TENSOR_H diff --git a/csrc/preprocess/transform/load.cpp b/csrc/preprocess/transform/load.cpp index 671948f2d..462c70a83 100644 --- a/csrc/preprocess/transform/load.cpp +++ b/csrc/preprocess/transform/load.cpp @@ -31,7 +31,7 @@ PrepareImageImpl::PrepareImageImpl(const Value& args) : TransformImpl(args) { */ Result PrepareImageImpl::Process(const Value& input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); assert(input.contains("ori_img")); // copy input data, and update its properties later @@ -50,7 +50,7 @@ Result PrepareImageImpl::Process(const Value& input) { } output["ori_shape"] = {1, src_mat.height(), src_mat.width(), src_mat.channel()}; output["img_fields"].push_back("img"); - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } @@ -58,7 +58,7 @@ Result PrepareImageImpl::Process(const Value& input) { PrepareImage::PrepareImage(const Value& args, int version) : Transform(args) { auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - ERROR("'PrepareImage' is not supported on '{}' platform", specified_platform_); + MMDEPLOY_ERROR("'PrepareImage' is not supported on '{}' platform", specified_platform_); throw std::domain_error("'PrepareImage' is not supported on specified platform"); } impl_ = impl_creator->Create(args); @@ -80,4 +80,7 @@ class PrepareImageCreator : public Creator { }; REGISTER_MODULE(Transform, PrepareImageCreator); + +MMDEPLOY_DEFINE_REGISTRY(PrepareImageImpl); + } // namespace mmdeploy diff --git a/csrc/preprocess/transform/load.h b/csrc/preprocess/transform/load.h index 32f0bdfdb..a05d4c136 100644 --- a/csrc/preprocess/transform/load.h +++ b/csrc/preprocess/transform/load.h @@ -8,7 +8,7 @@ #include "transform.h" namespace mmdeploy { -class PrepareImageImpl : public TransformImpl { +class MMDEPLOY_API PrepareImageImpl : public TransformImpl { public: explicit PrepareImageImpl(const Value& args); ~PrepareImageImpl() = default; @@ -29,7 +29,7 @@ class PrepareImageImpl : public TransformImpl { ArgType arg_; }; -class PrepareImage : public Transform { +class MMDEPLOY_API PrepareImage : public Transform { public: explicit PrepareImage(const Value& args, int version = 0); ~PrepareImage() = default; @@ -40,6 +40,8 @@ class PrepareImage : public Transform { std::unique_ptr impl_; }; +MMDEPLOY_DECLARE_REGISTRY(PrepareImageImpl); + } // namespace mmdeploy #endif // MMDEPLOY_LOAD_H diff --git a/csrc/preprocess/transform/normalize.cpp b/csrc/preprocess/transform/normalize.cpp index a0bc5f7ba..7fc9c2ad3 100644 --- a/csrc/preprocess/transform/normalize.cpp +++ b/csrc/preprocess/transform/normalize.cpp @@ -10,9 +10,11 @@ using namespace std; namespace mmdeploy { +// MMDEPLOY_DEFINE_REGISTRY(NormalizeImpl); + NormalizeImpl::NormalizeImpl(const Value& args) : TransformImpl(args) { - if (!args.contains("mean") or !args.contains("std")) { - ERROR("no 'mean' or 'std' is configured"); + if (!args.contains("mean") || !args.contains("std")) { + MMDEPLOY_ERROR("no 'mean' or 'std' is configured"); throw std::invalid_argument("no 'mean' or 'std' is configured"); } for (auto& v : args["mean"]) { @@ -50,7 +52,7 @@ NormalizeImpl::NormalizeImpl(const Value& args) : TransformImpl(args) { */ Result NormalizeImpl::Process(const Value& input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); // copy input data, and update its properties later Value output = input; @@ -73,14 +75,14 @@ Result NormalizeImpl::Process(const Value& input) { } output["img_norm_cfg"]["to_rgb"] = arg_.to_rgb; } - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } Normalize::Normalize(const Value& args, int version) : Transform(args) { auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - ERROR("'Normalize' is not supported on '{}' platform", specified_platform_); + MMDEPLOY_ERROR("'Normalize' is not supported on '{}' platform", specified_platform_); throw std::domain_error("'Normalize' is not supported on specified platform"); } impl_ = impl_creator->Create(args); @@ -98,4 +100,6 @@ class NormalizeCreator : public Creator { REGISTER_MODULE(Transform, NormalizeCreator); +MMDEPLOY_DEFINE_REGISTRY(NormalizeImpl); + } // namespace mmdeploy diff --git a/csrc/preprocess/transform/normalize.h b/csrc/preprocess/transform/normalize.h index 14a4edb43..fef8fd17c 100644 --- a/csrc/preprocess/transform/normalize.h +++ b/csrc/preprocess/transform/normalize.h @@ -8,7 +8,7 @@ namespace mmdeploy { -class NormalizeImpl : public TransformImpl { +class MMDEPLOY_API NormalizeImpl : public TransformImpl { public: explicit NormalizeImpl(const Value& args); ~NormalizeImpl() = default; @@ -28,7 +28,7 @@ class NormalizeImpl : public TransformImpl { ArgType arg_; }; -class Normalize : public Transform { +class MMDEPLOY_API Normalize : public Transform { public: explicit Normalize(const Value& args, int version = 0); ~Normalize() = default; @@ -39,5 +39,7 @@ class Normalize : public Transform { std::unique_ptr impl_; }; +MMDEPLOY_DECLARE_REGISTRY(NormalizeImpl); + } // namespace mmdeploy #endif // MMDEPLOY_NORMALIZE_H diff --git a/csrc/preprocess/transform/pad.cpp b/csrc/preprocess/transform/pad.cpp index 4d9c6c69a..9eb60748f 100644 --- a/csrc/preprocess/transform/pad.cpp +++ b/csrc/preprocess/transform/pad.cpp @@ -28,7 +28,7 @@ PadImpl::PadImpl(const Value& args) : TransformImpl(args) { } Result PadImpl::Process(const Value& input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); Value output = input; auto img_fields = GetImageFields(input); @@ -38,7 +38,7 @@ Result PadImpl::Process(const Value& input) { assert(tensor.desc().shape.size() == 4); assert(tensor.desc().shape[0] == 1); - assert(tensor.desc().shape[3] == 3 or tensor.desc().shape[3] == 1); + assert(tensor.desc().shape[3] == 3 || tensor.desc().shape[3] == 1); int height = tensor.desc().shape[1]; int width = tensor.desc().shape[2]; @@ -75,14 +75,14 @@ Result PadImpl::Process(const Value& input) { } } - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } Pad::Pad(const Value& args, int version) : Transform(args) { auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - ERROR("'Pad' is not supported on '{}' platform", specified_platform_); + MMDEPLOY_ERROR("'Pad' is not supported on '{}' platform", specified_platform_); throw std::domain_error("'Pad' is not supported on specified platform"); } impl_ = impl_creator->Create(args); @@ -100,4 +100,6 @@ class PadCreator : public Creator { REGISTER_MODULE(Transform, PadCreator); +MMDEPLOY_DEFINE_REGISTRY(PadImpl); + } // namespace mmdeploy diff --git a/csrc/preprocess/transform/pad.h b/csrc/preprocess/transform/pad.h index e684791a5..1b5ccbcd5 100644 --- a/csrc/preprocess/transform/pad.h +++ b/csrc/preprocess/transform/pad.h @@ -3,12 +3,14 @@ #ifndef MMDEPLOY_PAD_H #define MMDEPLOY_PAD_H +#include + #include "core/tensor.h" #include "transform.h" namespace mmdeploy { -class PadImpl : public TransformImpl { +class MMDEPLOY_API PadImpl : public TransformImpl { public: explicit PadImpl(const Value& args); ~PadImpl() override = default; @@ -33,7 +35,7 @@ class PadImpl : public TransformImpl { ArgType arg_; }; -class Pad : public Transform { +class MMDEPLOY_API Pad : public Transform { public: explicit Pad(const Value& args, int version = 0); ~Pad() override = default; @@ -43,6 +45,9 @@ class Pad : public Transform { protected: std::unique_ptr impl_; }; + +MMDEPLOY_DECLARE_REGISTRY(PadImpl); + } // namespace mmdeploy #endif // MMDEPLOY_PAD_H diff --git a/csrc/preprocess/transform/resize.cpp b/csrc/preprocess/transform/resize.cpp index 604103e6c..98398e3dc 100644 --- a/csrc/preprocess/transform/resize.cpp +++ b/csrc/preprocess/transform/resize.cpp @@ -19,14 +19,14 @@ ResizeImpl::ResizeImpl(const Value& args) : TransformImpl(args) { arg_.img_scale = {size, size}; } else if (args["size"].is_array()) { if (args["size"].size() != 2) { - ERROR("'size' expects an array of size 2, but got {}", args["size"].size()); + MMDEPLOY_ERROR("'size' expects an array of size 2, but got {}", args["size"].size()); throw std::length_error("'size' expects an array of size 2"); } auto height = args["size"][0].get(); auto width = args["size"][1].get(); arg_.img_scale = {height, width}; } else { - ERROR("'size' is expected to be an integer or and array of size 2"); + MMDEPLOY_ERROR("'size' is expected to be an integer or and array of size 2"); throw std::domain_error("'size' is expected to be an integer or and array of size 2"); } } @@ -35,13 +35,13 @@ ResizeImpl::ResizeImpl(const Value& args) : TransformImpl(args) { vector interpolations{"nearest", "bilinear", "bicubic", "area", "lanczos"}; if (std::find(interpolations.begin(), interpolations.end(), arg_.interpolation) == interpolations.end()) { - ERROR("'{}' interpolation is not supported", arg_.interpolation); + MMDEPLOY_ERROR("'{}' interpolation is not supported", arg_.interpolation); throw std::invalid_argument("unexpected interpolation"); } } Result ResizeImpl::Process(const Value& input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); Value output = input; auto img_fields = GetImageFields(input); @@ -66,7 +66,7 @@ Result ResizeImpl::Process(const Value& input) { dst_h = int(h * scale_factor + 0.5); dst_w = int(w * scale_factor + 0.5); } else if (!arg_.img_scale.empty()) { - DEBUG( + MMDEPLOY_WARN( "neither 'scale' or 'scale_factor' is provided in input value. " "'img_scale' will be used"); if (-1 == arg_.img_scale[1]) { @@ -82,7 +82,7 @@ Result ResizeImpl::Process(const Value& input) { dst_w = arg_.img_scale[1]; } } else { - ERROR("no resize related parameter is provided"); + MMDEPLOY_ERROR("no resize related parameter is provided"); return Status(eInvalidArgument); } if (arg_.keep_ratio) { @@ -111,14 +111,14 @@ Result ResizeImpl::Process(const Value& input) { output[key] = dst_img; } - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } Resize::Resize(const Value& args, int version) : Transform(args) { auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - ERROR("'Resize' is not supported on '{}' platform", specified_platform_); + MMDEPLOY_ERROR("'Resize' is not supported on '{}' platform", specified_platform_); throw std::domain_error("'Resize' is not supported on specified platform"); } impl_ = impl_creator->Create(args); @@ -136,4 +136,6 @@ class ResizeCreator : public Creator { REGISTER_MODULE(Transform, ResizeCreator); +MMDEPLOY_DEFINE_REGISTRY(ResizeImpl); + } // namespace mmdeploy diff --git a/csrc/preprocess/transform/resize.h b/csrc/preprocess/transform/resize.h index 1b9c10034..54947bee4 100644 --- a/csrc/preprocess/transform/resize.h +++ b/csrc/preprocess/transform/resize.h @@ -3,11 +3,13 @@ #ifndef MMDEPLOY_RESIZE_H #define MMDEPLOY_RESIZE_H +#include + #include "core/tensor.h" #include "transform.h" namespace mmdeploy { -class ResizeImpl : public TransformImpl { +class MMDEPLOY_API ResizeImpl : public TransformImpl { public: explicit ResizeImpl(const Value& args); ~ResizeImpl() override = default; @@ -29,7 +31,7 @@ class ResizeImpl : public TransformImpl { ArgType arg_; }; -class Resize : public Transform { +class MMDEPLOY_API Resize : public Transform { public: explicit Resize(const Value& args, int version = 0); ~Resize() override = default; @@ -40,5 +42,8 @@ class Resize : public Transform { std::unique_ptr impl_; static const std::string name_; }; + +MMDEPLOY_DECLARE_REGISTRY(ResizeImpl); + } // namespace mmdeploy #endif // MMDEPLOY_RESIZE_H diff --git a/csrc/preprocess/transform/transform.cpp b/csrc/preprocess/transform/transform.cpp index d57cfa393..f2be7519c 100644 --- a/csrc/preprocess/transform/transform.cpp +++ b/csrc/preprocess/transform/transform.cpp @@ -48,4 +48,6 @@ Transform::Transform(const Value &args) { } } +MMDEPLOY_DEFINE_REGISTRY(Transform); + } // namespace mmdeploy diff --git a/csrc/preprocess/transform/transform.h b/csrc/preprocess/transform/transform.h index fa1a700ea..ba96e91a1 100644 --- a/csrc/preprocess/transform/transform.h +++ b/csrc/preprocess/transform/transform.h @@ -9,7 +9,7 @@ namespace mmdeploy { -class TransformImpl : public Module { +class MMDEPLOY_API TransformImpl : public Module { public: TransformImpl() = default; explicit TransformImpl(const Value& args); @@ -23,41 +23,36 @@ class TransformImpl : public Module { Stream stream_; }; -class Transform : public Module { +class MMDEPLOY_API Transform : public Module { public: + ~Transform() override = default; + Transform() = default; explicit Transform(const Value& args); - ~Transform() override = default; + Transform(const Transform&) = delete; + Transform& operator=(const Transform&) = delete; const std::string& RuntimePlatform() const { return runtime_platform_; } protected: template - [[deprecated]] - /* - * We cannot LOG the error message, because WARN/INFO/ERROR causes - * redefinition when building UTs "catch2.hpp" used in UTs has the same LOG - * declaration - */ - std::unique_ptr - Instantiate(const char* transform_type, const Value& args, int version = 0) { + [[deprecated]] std::unique_ptr Instantiate(const char* transform_type, const Value& args, + int version = 0) { std::unique_ptr impl(nullptr); auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - // WARN("cannot find {} implementation on specific platform {} ", - // transform_type, specified_platform_); + MMDEPLOY_WARN("cannot find {} implementation on specific platform {} ", transform_type, + specified_platform_); for (auto& name : candidate_platforms_) { impl_creator = Registry::Get().GetCreator(name); if (impl_creator) { - // INFO("fallback {} implementation to platform {}", transform_type, - // name); + MMDEPLOY_INFO("fallback {} implementation to platform {}", transform_type, name); break; } } } if (nullptr == impl_creator) { - // ERROR("cannot find {} implementation on any registered platform ", - // transform_type); + MMDEPLOY_ERROR("cannot find {} implementation on any registered platform ", transform_type); return nullptr; } else { return impl_creator->Create(args); @@ -70,6 +65,8 @@ class Transform : public Module { std::vector candidate_platforms_; }; +MMDEPLOY_DECLARE_REGISTRY(Transform); + } // namespace mmdeploy #endif // MMDEPLOY_TRANSFORM_H diff --git a/csrc/preprocess/transform_module.cpp b/csrc/preprocess/transform_module.cpp index b76987821..9b7b2f01f 100644 --- a/csrc/preprocess/transform_module.cpp +++ b/csrc/preprocess/transform_module.cpp @@ -3,6 +3,7 @@ #include "transform_module.h" #include "archive/value_archive.h" +#include "core/module.h" #include "core/utils/formatter.h" #include "experimental/module_adapter.h" #include "preprocess/transform/transform.h" @@ -15,12 +16,12 @@ TransformModule::TransformModule(const Value& args) { const auto type = "Compose"; auto creator = Registry::Get().GetCreator(type, 1); if (!creator) { - ERROR("unable to find creator: {}", type); + MMDEPLOY_ERROR("unable to find creator: {}", type); throw_exception(eEntryNotFound); } auto cfg = args; if (cfg.contains("device")) { - WARN("force using device: {}", cfg["device"].get()); + MMDEPLOY_WARN("force using device: {}", cfg["device"].get()); auto device = Device(cfg["device"].get()); cfg["context"]["device"] = device; cfg["context"]["stream"] = Stream::GetDefault(device); @@ -31,7 +32,7 @@ TransformModule::TransformModule(const Value& args) { Result TransformModule::operator()(const Value& input) { auto output = transform_->Process(input); if (!output) { - ERROR("error: {}", output.error().message().c_str()); + MMDEPLOY_ERROR("error: {}", output.error().message().c_str()); } auto& ret = output.value(); if (ret.is_object()) { @@ -39,13 +40,13 @@ Result TransformModule::operator()(const Value& input) { } else if (ret.is_array() && ret.size() == 1 && ret[0].is_object()) { ret = ret[0]; } else { - ERROR("unsupported return value: {}", ret); + MMDEPLOY_ERROR("unsupported return value: {}", ret); return Status(eNotSupported); } return ret; } -class TransformModuleCreator : public Creator { +class MMDEPLOY_API TransformModuleCreator : public Creator { public: const char* GetName() const override { return "Transform"; } int GetVersion() const override { return 0; } diff --git a/csrc/utils/CMakeLists.txt b/csrc/utils/CMakeLists.txt new file mode 100644 index 000000000..1eef35940 --- /dev/null +++ b/csrc/utils/CMakeLists.txt @@ -0,0 +1,3 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +add_subdirectory(opencv) diff --git a/csrc/utils/opencv/CMakeLists.txt b/csrc/utils/opencv/CMakeLists.txt new file mode 100644 index 000000000..6eb8bd2e9 --- /dev/null +++ b/csrc/utils/opencv/CMakeLists.txt @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +cmake_minimum_required(VERSION 3.14) +project(mmdeploy_opencv_utils) + +include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) + +mmdeploy_add_library(${PROJECT_NAME} opencv_utils.cpp) + +target_link_libraries(${PROJECT_NAME} + PRIVATE mmdeploy::core + PUBLIC ${OpenCV_LIBS}) + +target_include_directories(${PROJECT_NAME} + INTERFACE $) + +#export_module(${PROJECT_NAME}) diff --git a/csrc/preprocess/cpu/opencv_utils.cpp b/csrc/utils/opencv/opencv_utils.cpp similarity index 91% rename from csrc/preprocess/cpu/opencv_utils.cpp rename to csrc/utils/opencv/opencv_utils.cpp index d02d5571c..ef05dbf2e 100644 --- a/csrc/preprocess/cpu/opencv_utils.cpp +++ b/csrc/utils/opencv/opencv_utils.cpp @@ -42,7 +42,7 @@ cv::Mat Mat2CVMat(const Mat& mat) { {DataType::kINT32, CV_32S}}; auto type = CV_MAKETYPE(type_mapper[mat.type()], mat.channel()); auto format = mat.pixel_format(); - if (PixelFormat::kBGR == format or PixelFormat::kRGB == format) { + if (PixelFormat::kBGR == format || PixelFormat::kRGB == format) { return cv::Mat(mat.height(), mat.width(), type, mat.data()); } else if (PixelFormat::kGRAYSCALE == format) { return cv::Mat(mat.height(), mat.width(), type, mat.data()); @@ -59,7 +59,7 @@ cv::Mat Mat2CVMat(const Mat& mat) { } else if (PixelFormat::kBGRA == format) { return cv::Mat(mat.height(), mat.width(), type, mat.data()); } else { - ERROR("unsupported mat format {}", format); + MMDEPLOY_ERROR("unsupported mat format {}", format); return {}; } } @@ -78,7 +78,7 @@ cv::Mat Tensor2CVMat(const Tensor& tensor) { return {h, w, CV_32SC(c), const_cast(tensor.data())}; } else { assert(0); - ERROR("unsupported type: {}", desc.data_type); + MMDEPLOY_ERROR("unsupported type: {}", desc.data_type); return {}; } } @@ -95,7 +95,7 @@ Tensor CVMat2Tensor(const cv::Mat& mat) { shape = {1, mat.rows, mat.cols, mat.channels()}; data_type = DataType::kINT32; } else { - ERROR("unsupported mat dat type {}", mat.type()); + MMDEPLOY_ERROR("unsupported mat dat type {}", mat.type()); assert(0); return {}; } @@ -118,7 +118,7 @@ cv::Mat Resize(const cv::Mat& src, int dst_height, int dst_width, } else if (interpolation == "lanczos") { cv::resize(src, dst, dst.size(), 0, 0, cv::INTER_LANCZOS4); } else { - ERROR("{} interpolation is not supported", interpolation); + MMDEPLOY_ERROR("{} interpolation is not supported", interpolation); assert(0); } return dst; @@ -189,7 +189,7 @@ cv::Mat ColorTransfer(const cv::Mat& src, PixelFormat src_format, PixelFormat ds cv::cvtColor(src, dst, cv::COLOR_BGRA2BGR); break; default: - ERROR("unsupported src mat's element type {}", src_format); + MMDEPLOY_ERROR("unsupported src mat's element type {}", src_format); assert(0); return {}; } @@ -214,7 +214,7 @@ cv::Mat ColorTransfer(const cv::Mat& src, PixelFormat src_format, PixelFormat ds cv::cvtColor(src, dst, cv::COLOR_BGRA2RGB); break; default: - ERROR("unsupported src mat's element type {}", src_format); + MMDEPLOY_ERROR("unsupported src mat's element type {}", src_format); assert(0); return {}; } @@ -239,12 +239,12 @@ cv::Mat ColorTransfer(const cv::Mat& src, PixelFormat src_format, PixelFormat ds cv::cvtColor(src, dst, cv::COLOR_BGRA2GRAY); break; default: - ERROR("unsupported src mat's element type {}", src_format); + MMDEPLOY_ERROR("unsupported src mat's element type {}", src_format); assert(0); return {}; } } else { - ERROR("unsupported target mat's element type {}", dst_format); + MMDEPLOY_ERROR("unsupported target mat's element type {}", dst_format); assert(0); return {}; } @@ -267,7 +267,7 @@ bool Compare(const cv::Mat& src1, const cv::Mat& src2) { cv::subtract(_src1, _src2, diff); diff = cv::abs(diff); auto sum = cv::sum(cv::sum(diff)); - DEBUG("sum: {}, average: {}", sum[0], sum[0] * 1.0 / (src1.rows * src1.cols)); + MMDEPLOY_DEBUG("sum: {}, average: {}", sum[0], sum[0] * 1.0 / (src1.rows * src1.cols)); return sum[0] / (src1.rows * src1.cols) < 0.5f; } diff --git a/csrc/preprocess/cpu/opencv_utils.h b/csrc/utils/opencv/opencv_utils.h similarity index 69% rename from csrc/preprocess/cpu/opencv_utils.h rename to csrc/utils/opencv/opencv_utils.h index 45aa360ff..05f8405eb 100644 --- a/csrc/preprocess/cpu/opencv_utils.h +++ b/csrc/utils/opencv/opencv_utils.h @@ -1,7 +1,7 @@ // Copyright (c) OpenMMLab. All rights reserved. -#ifndef MMDEPLOY_OPENCV_UTILS_H -#define MMDEPLOY_OPENCV_UTILS_H +#ifndef MMDEPLOY_CSRC_UTILS_OPENCV_OPENCV_UTILS_H_ +#define MMDEPLOY_CSRC_UTILS_OPENCV_OPENCV_UTILS_H_ #include "core/mat.h" #include "core/mpl/type_traits.h" @@ -12,11 +12,11 @@ namespace mmdeploy { namespace cpu { -cv::Mat Mat2CVMat(const Mat& mat); -cv::Mat Tensor2CVMat(const Tensor& tensor); +MMDEPLOY_API cv::Mat Mat2CVMat(const Mat& mat); +MMDEPLOY_API cv::Mat Tensor2CVMat(const Tensor& tensor); -Mat CVMat2Mat(const cv::Mat& mat, PixelFormat format); -Tensor CVMat2Tensor(const cv::Mat& mat); +MMDEPLOY_API Mat CVMat2Mat(const cv::Mat& mat, PixelFormat format); +MMDEPLOY_API Tensor CVMat2Tensor(const cv::Mat& mat); /** * @brief resize an image to specified size @@ -26,7 +26,8 @@ Tensor CVMat2Tensor(const cv::Mat& mat); * @param dst_width output image's width * @return output image if success, error code otherwise */ -cv::Mat Resize(const cv::Mat& src, int dst_height, int dst_width, const std::string& interpolation); +MMDEPLOY_API cv::Mat Resize(const cv::Mat& src, int dst_height, int dst_width, + const std::string& interpolation); /** * @brief crop an image @@ -38,7 +39,7 @@ cv::Mat Resize(const cv::Mat& src, int dst_height, int dst_width, const std::str * @param right * @return cv::Mat */ -cv::Mat Crop(const cv::Mat& src, int top, int left, int bottom, int right); +MMDEPLOY_API cv::Mat Crop(const cv::Mat& src, int top, int left, int bottom, int right); /** * @brief Do normalization to an image @@ -50,8 +51,8 @@ cv::Mat Crop(const cv::Mat& src, int top, int left, int bottom, int right); * @param inplace * @return cv::Mat */ -cv::Mat Normalize(cv::Mat& src, const std::vector& mean, const std::vector& std, - bool to_rgb, bool inplace = true); +MMDEPLOY_API cv::Mat Normalize(cv::Mat& src, const std::vector& mean, + const std::vector& std, bool to_rgb, bool inplace = true); /** * @brief tranpose an image, from {h, w, c} to {c, h, w} @@ -59,7 +60,7 @@ cv::Mat Normalize(cv::Mat& src, const std::vector& mean, const std::vecto * @param src input image * @return */ -cv::Mat Transpose(const cv::Mat& src); +MMDEPLOY_API cv::Mat Transpose(const cv::Mat& src); /** * @brief convert an image to another color space @@ -69,7 +70,8 @@ cv::Mat Transpose(const cv::Mat& src); * @param dst_format * @return */ -cv::Mat ColorTransfer(const cv::Mat& src, PixelFormat src_format, PixelFormat dst_format); +MMDEPLOY_API cv::Mat ColorTransfer(const cv::Mat& src, PixelFormat src_format, + PixelFormat dst_format); /** * @@ -82,8 +84,8 @@ cv::Mat ColorTransfer(const cv::Mat& src, PixelFormat src_format, PixelFormat ds * @param val * @return */ -cv::Mat Pad(const cv::Mat& src, int top, int left, int bottom, int right, int border_type, - float val); +MMDEPLOY_API cv::Mat Pad(const cv::Mat& src, int top, int left, int bottom, int right, + int border_type, float val); /** * @brief compare two images @@ -92,7 +94,7 @@ cv::Mat Pad(const cv::Mat& src, int top, int left, int bottom, int right, int bo * @param src2 the other input image * @return bool true means the images are the same */ -bool Compare(const cv::Mat& src1, const cv::Mat& src2); +MMDEPLOY_API bool Compare(const cv::Mat& src1, const cv::Mat& src2); } // namespace cpu @@ -104,8 +106,6 @@ struct IsCvPoint : std::false_type {}; template struct IsCvPoint<::cv::Point_> : std::true_type {}; -} // namespace detail - template >::value, int> = 0> void serialize(Archive&& archive, T&& p) { @@ -146,6 +146,8 @@ void load(Archive& archive, std::vector& v) { } } +} // namespace detail + } // namespace mmdeploy -#endif // MMDEPLOY_OPENCV_UTILS_H +#endif // MMDEPLOY_CSRC_UTILS_OPENCV_OPENCV_UTILS_H_ diff --git a/demo/csrc/CMakeLists.txt b/demo/csrc/CMakeLists.txt index 3da3b04b7..3e1bdcc6f 100644 --- a/demo/csrc/CMakeLists.txt +++ b/demo/csrc/CMakeLists.txt @@ -2,13 +2,18 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy-example) -find_package(OpenCV REQUIRED) find_package(MMDeploy REQUIRED) function(add_example name) - add_executable(${name} ${name}.cpp) - target_link_libraries(${name} ${MMDeploy_LIBS} -Wl,--disable-new-dtags - opencv_imgcodecs opencv_imgproc opencv_core) + file(GLOB _SRCS ${name}.c*) + add_executable(${name} ${_SRCS}) + if (NOT MSVC) + # disable new dtags so that executables can run even without LD_LIBRARY_PATH set + target_link_libraries(${name} PRIVATE -Wl,--disable-new-dtags) + endif () + mmdeploy_load_static(${name} MMDeployStaticModules) + mmdeploy_load_dynamic(${name} MMDeployDynamicModules) + target_link_libraries(${name} PRIVATE MMDeployLibs ${OpenCV_LIBS}) endfunction() add_example(image_classification) diff --git a/demo/csrc/image_classification.cpp b/demo/csrc/image_classification.cpp index be618659b..18d1e0793 100644 --- a/demo/csrc/image_classification.cpp +++ b/demo/csrc/image_classification.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include "classifier.h" diff --git a/demo/csrc/image_restorer.cpp b/demo/csrc/image_restorer.cpp index 4e462ce08..3984c88a8 100644 --- a/demo/csrc/image_restorer.cpp +++ b/demo/csrc/image_restorer.cpp @@ -1,7 +1,7 @@ // Copyright (c) OpenMMLab. All rights reserved. #include -#include +#include #include #include diff --git a/demo/csrc/image_segmentation.cpp b/demo/csrc/image_segmentation.cpp index 71b310815..8502ecec0 100644 --- a/demo/csrc/image_segmentation.cpp +++ b/demo/csrc/image_segmentation.cpp @@ -1,7 +1,7 @@ // Copyright (c) OpenMMLab. All rights reserved. #include -#include +#include #include #include #include @@ -13,7 +13,7 @@ using namespace std; vector gen_palette(int num_classes) { std::mt19937 gen; - std::uniform_int_distribution uniform_dist(0, 255); + std::uniform_int_distribution uniform_dist(0, 255); vector palette; palette.reserve(num_classes); diff --git a/demo/csrc/object_detection.cpp b/demo/csrc/object_detection.cpp index a57b4f41f..184340753 100644 --- a/demo/csrc/object_detection.cpp +++ b/demo/csrc/object_detection.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include diff --git a/demo/csrc/ocr.cpp b/demo/csrc/ocr.cpp index f82d1eca6..1bb8d43ef 100644 --- a/demo/csrc/ocr.cpp +++ b/demo/csrc/ocr.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include diff --git a/docs/en/build/linux.md b/docs/en/build/linux.md new file mode 100644 index 000000000..1333ed77b --- /dev/null +++ b/docs/en/build/linux.md @@ -0,0 +1 @@ +TODO diff --git a/docs/en/build/windows.md b/docs/en/build/windows.md new file mode 100644 index 000000000..1333ed77b --- /dev/null +++ b/docs/en/build/windows.md @@ -0,0 +1 @@ +TODO diff --git a/docs/zh_cn/build/linux.md b/docs/zh_cn/build/linux.md new file mode 100644 index 000000000..1333ed77b --- /dev/null +++ b/docs/zh_cn/build/linux.md @@ -0,0 +1 @@ +TODO diff --git a/docs/zh_cn/build/windows.md b/docs/zh_cn/build/windows.md new file mode 100644 index 000000000..e0bcdfbc3 --- /dev/null +++ b/docs/zh_cn/build/windows.md @@ -0,0 +1,336 @@ +- [Windows 下构建方式](#windows-下构建方式) + - [源码安装](#源码安装) + - [安装构建和编译工具链](#安装构建和编译工具链) + - [安装依赖包](#安装依赖包) + - [安装 MMDeploy Converter 依赖](#安装-mmdeploy-converter-依赖) + - [安装 MMDeploy SDK 依赖](#安装-mmdeploy-sdk-依赖) + - [安装推理引擎](#安装推理引擎) + - [编译 MMDeploy](#编译-mmdeploy) + - [编译安装 Model Converter](#编译安装-model-converter) + - [编译自定义算子](#编译自定义算子) + - [安装 Model Converter](#安装-model-converter) + - [编译 SDK](#编译-sdk) + - [编译选项说明](#编译选项说明) + - [编译样例](#编译样例) + - [编译 SDK Demo](#编译-sdk-demo) + - [注意事项](#注意事项) + +--- +# Windows 下构建方式 + +目前,MMDeploy 在 Windows 平台下仅提供源码编译安装方式。未来会提供预编译包方式。 + +## 源码安装 +下述安装方式,均是在 **Windows 10** 下进行 +### 安装构建和编译工具链 +1. 下载并安装 [Visual Studio 2019](https://visualstudio.microsoft.com) 。安装时请勾选 "使用C++的桌面开发, "Windows 10 SDK
+2. 把 cmake 路径加入到环境变量 PATH 中, "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin"
+3. 如果系统中配置了 NVIDIA 显卡,根据官网教程,下载并安装 cuda toolkit。
+### 安装依赖包 + +#### 安装 MMDeploy Converter 依赖 + + + + + + + + + + + + + + + + + + + + + +
名称 安装方法
conda 强烈建议安装conda,或者miniconda。比如,
https://repo.anaconda.com/miniconda/Miniconda3-py37_4.11.0-Windows-x86_64.exe
安装完毕后,打开系统开始菜单,输入prompt,选择并打开 anaconda powershell prompt。
下文中的安装命令均是在 anaconda powershell 中测试验证的。
pytorch
(>=1.8.0)
+ 参考pytorch官网,根据系统环境, 选择合适的预编译包进行安装。比如,
+

+    pip install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
+    
+
mmcv-full 参考mmcv官网,根据系统环境,选择预编译包进行安装。比如,
+

+    $env:cu_version="cu111"
+    $env:torch_version="torch1.8.0"
+    pip install mmcv-full==1.4.0 -f https://download.openmmlab.com/mmcv/dist/$env:cu_version/$env:torch_version/index.html
+    
+
+ + +#### 安装 MMDeploy SDK 依赖 + + + + + + + + + + + + + + + + + + + + + + +
名称 安装方法
spdlog spdlog是一个精巧的日志管理库。请参考如下命令安装:
+ 1. 下载 https://github.com/gabime/spdlog/archive/refs/tags/v1.9.2.zip
+ 2. 解压后,进入到文件夹 spdlog-v1.9.2
+ 3. 执行编译安装命令
+

+    mkdir build
+    cd build
+    cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_INSTALL_PREFIX=install -DCMAKE_BUILD_TYPE=Release
+    cmake --build . --target install -j --config Release
+    
+
OpenCV + 1. 下载并安装 OpenCV 在 windows 下的预编译包: https://github.com/opencv/opencv/releases/download/4.5.5/opencv-4.5.5-vc14_vc15.exe
+ 2. 把 OpenCV 库的路径加入到环境变量 PATH 中
pplcv pplcv 是在x86和cuda平台下的高性能图像处理库。 此依赖项为可选项,只有在cuda平台下,才需安装。而且,目前必须使用v0.6.2,且需要使用git clone的方式下载源码并编译安装
+

+    git clone --recursive git@github.com:openppl-public/ppl.cv.git
+    cd ppl.cv
+    git checkout tags/v0.6.2 -b v0.6.2
+    ./build.bat -G "Visual Studio 16 2019" -T v142 -A x64 -DHPCC_USE_CUDA=ON -DHPCC_MSVC_MD=ON
+    
+
+ + + +#### 安装推理引擎 +目前,在 Windows 平台下,MMDeploy 支持 ONNXRuntime 和 TensorRT 两种推理引擎。其他推理引擎尚未进行验证,或者验证未通过。后续将陆续予以支持 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
推理引擎 依赖包安装方法
ONNXRuntimeonnxruntime + 1. 下载二进制包:https://github.com/microsoft/onnxruntime/releases/download/v1.8.0/onnxruntime-win-x64-1.8.0.zip
+ 2. 解压到目标路径。我们使用 onnxruntime_dir 代表此路径
+ 3. 在 PATH 中增加 onnxruntime libs 路径, +

+    $env:path = "{onnxruntime_dir}/lib;" + $env:path
+    
+
TensorRT
TensorRT
+ 1. 从NVIDIA官网下载二进制包, 比如,
+ https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/8.2.3.0/zip/TensorRT-8.2.3.0.Windows10.x86_64.cuda-11.4.cudnn8.2.zip
+ 1. 解压二进制包到目标路径。我们使用 tensorrt_dir 代表此路径
+ 2. 安装 tensorrt 的 python package
+ 3. 在 PATH 中增加 tensorrt libs 路径 +

+   pip install {tensorrt_dir}/python/tensorrt-8.2.3.0-cp37-none-win_amd64.whl
+   $env:path = "{tensorrt_dir}/lib;" + $env:path
+   
+
cudnn + 1. 从NVIDIA官网下载二进制包, 比如,
+ https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.2.1.32/11.3_06072021/cudnn-11.3-windows-x64-v8.2.1.32.zip
+ 1. 解压二进制包到目标路径。我们使用 cudnn_dir 代表此路径
+ 2. 在 PATH 中增加 cudnn libs 路径 +

+   $env:path = "{cudnn_dir}/bin;" + $env:path
+   
+   
PPL.NNppl.nn TODO
OpenVINOopenvino TODO
ncnn ncnn TODO
+ +### 编译 MMDeploy + +#### 编译安装 Model Converter +##### 编译自定义算子 +- **ONNXRuntime** 自定义算子 +```powershell +mkdir build +cd build +cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 -DMMDEPLOY_TARGET_BACKENDS="ort" -DONNXRUNTIME_DIR={onnxruntime_dir} +cmake --build . --config Release -- /maxcpucount:4 +``` + +- **TensorRT** 自定义算子 + +```powershell +mkdir build +cd build +cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 -DMMDEPLOY_TARGET_BACKENDS="trt" -DTENSORRT_DIR={tensorrt_dir} -DCUDNN_DIR={cudnn_dir} +cmake --build . --config Release -- /maxcpucount:4 +``` + +- **ncnn** 自定义算子 + + TODO + +##### 安装 Model Converter +```powershell +cd root/path/of/MMDeploy +pip install -e . +``` +#### 编译 SDK +##### 编译选项说明 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
编译选项取值范围缺省值说明
MMDEPLOY_BUILD_SDK{ON, OFF}OFFMMDeploy SDK 编译开关
MMDEPLOY_BUILD_SDK_PYTHON_API{ON, OFF}OFFMMDeploy SDK python package的编译开关
MMDEPLOY_BUILD_TEST{ON, OFF}OFFMMDeploy SDK的测试程序编译开关
MMDEPLOY_TARGET_DEVICES{"cpu", "cuda"}cpu设置目标设备。当有多个设备时,设备名称之间使用分号隔开。 比如,-DMMDEPLOY_TARGET_DEVICES="cpu;cuda"
MMDEPLOY_TARGET_BACKENDS{"trt", "ort", "pplnn", "ncnn", "openvino"}N/A 默认情况下,SDK不设置任何后端, 因为它与应用场景高度相关。 当选择多个后端时, 中间使用分号隔开。比如,
-DMMDEPLOY_TARGET_BACKENDS="trt;ort;pplnn;ncnn;openvino"
+ 构建时,几乎每个后端,都需设置一些环境变量,用来查找依赖包。
+ 1. trt: 表示 TensorRT, 需要设置 TENSORRT_DIR 和 CUDNN_DIR。类似,
-DTENSORRT_DIR={tensorrt_dir}
-DCUDNN_DIR={cudnn_dir}
+ 2. ort: 表示 ONNXRuntime,需要设置 ONNXRUNTIME_DIR。类似,
-DONNXRUNTIME_DIR={onnxruntime_dir}
+ 3. pplnn: 表示 PPL.NN,需要设置 pplnn_DIR。当前版本尚未验证
+ 4. ncnn:需要设置 ncnn_DIR。当前版本尚未验证
+ 5. openvino: 表示 OpenVINO,需要设置 InferenceEngine_DIR。当前版本尚未验证通过 +
MMDEPLOY_CODEBASES{"mmcls", "mmdet", "mmseg", "mmedit", "mmocr", "all"}N/A用来设置SDK后处理组件,加载OpenMMLab算法仓库的后处理功能。已支持的算法仓库有'mmcls','mmdet','mmedit','mmseg'和'mmocr'。如果选择多个codebase,中间使用分号隔开。比如,-DMMDEPLOY_CODEBASES="mmcls;mmdet"。也可以通过 -DMMDEPLOY_CODEBASES=all 方式,加载所有codebase。
BUILD_SHARED_LIBS{ON, OFF}ON动态库的编译开关。设置OFF时,编译静态库
+ + +##### 编译样例 + +下文展示2个构建SDK的样例,分别用于不同的运行环境。 + +- cpu + ONNXRuntime + + ```PowerShell + mkdir build + cd build + cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 ` + -DMMDEPLOY_BUILD_SDK=ON ` + -DMMDEPLOY_TARGET_DEVICES="cpu" ` + -DMMDEPLOY_TARGET_BACKENDS="ort" ` + -DMMDEPLOY_CODEBASES="all" ` + -DONNXRUNTIME_DIR={onnxruntime_dir} ` + -Dspdlog_DIR={spdlog_dir}/build/install/lib/cmake/spdlog ` + -DOpenCV_DIR={opencv_dir}/build + cmake --build . --config Release -- /maxcpucount:4 + cmake --install . --config Release + ``` + +- cuda + TensorRT + + ```PowerShell + mkdir build + cd build + cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 ` + -DMMDEPLOY_BUILD_SDK=ON ` + -DMMDEPLOY_TARGET_DEVICES="cuda" ` + -DMMDEPLOY_TARGET_BACKENDS="trt" ` + -DMMDEPLOY_CODEBASES="all" ` + -Dpplcv_DIR={pplcv_dir}/pplcv-build/install/lib/cmake/ppl ` + -DTENSORRT_DIR={tensorrt_dir} ` + -DCUDNN_DIR={cudnn_dir} ` + -Dspdlog_DIR={spdlog_dir}/build/install/lib/cmake/spdlog ` + -DOpenCV_DIR={opencv_dir}/build + cmake --build . --config Release -- /maxcpucount:4 + cmake --install . --config Release + ``` +- 其他 + + 请参考上述两个示例,以及前述 SDK 的编译选项,在其他运行环境下编译 SDK + +##### 编译 SDK Demo + +```PowerShell +cd install/example +mkdir build +cd build +cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 ` + -DMMDeploy_DIR={mmdeploy_dir}/build/install/lib/cmake/MMDeploy ` + -Dspdlog_DIR={spdlog_dir}/build/install/lib/cmake/spdlog ` + -DOpenCV_DIR={opencv_dir}/build +cmake --build . --config Release -- /maxcpucount:4 + +$env:path = "${mmdeploy_dir}/build/install/bin;" + $env:path + +``` + +### 注意事项 + 1. Release / Debug 库不能混用。MMDeploy要是编译Debug版本,所有第三方依赖都要是Debug版本。 diff --git a/mmdeploy/backend/ncnn/init_plugins.py b/mmdeploy/backend/ncnn/init_plugins.py index 97667defd..80cd871c5 100644 --- a/mmdeploy/backend/ncnn/init_plugins.py +++ b/mmdeploy/backend/ncnn/init_plugins.py @@ -1,7 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. -import glob import os +from mmdeploy.utils import get_file_path + def get_ops_path() -> str: """Get NCNN custom ops library path. @@ -9,14 +10,11 @@ def get_ops_path() -> str: Returns: str: The library path of NCNN custom ops. """ - wildcard = os.path.abspath( - os.path.join( - os.path.dirname(__file__), - '../../../build/lib/libmmdeploy_ncnn_ops.so')) - - paths = glob.glob(wildcard) - lib_path = paths[0] if len(paths) > 0 else '' - return lib_path + candidates = [ + '../../../build/lib/libmmdeploy_ncnn_ops.so', + '../../../build/bin/*/mmdeploy_ncnn_ops.pyd' + ] + return get_file_path(os.path.dirname(__file__), candidates) def get_onnx2ncnn_path() -> str: @@ -25,10 +23,7 @@ def get_onnx2ncnn_path() -> str: Returns: str: A path of onnx2ncnn tool. """ - wildcard = os.path.abspath( - os.path.join( - os.path.dirname(__file__), '../../../build/bin/onnx2ncnn')) - - paths = glob.glob(wildcard) - lib_path = paths[0] if len(paths) > 0 else '' - return lib_path + candidates = [ + '../../../build/bin/onnx2ncnn', '../../../build/bin/*/onnx2ncnn' + ] + return get_file_path(os.path.dirname(__file__), candidates) diff --git a/mmdeploy/backend/onnxruntime/init_plugins.py b/mmdeploy/backend/onnxruntime/init_plugins.py index 06cd00150..e8622eedf 100644 --- a/mmdeploy/backend/onnxruntime/init_plugins.py +++ b/mmdeploy/backend/onnxruntime/init_plugins.py @@ -1,7 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. -import glob import os +from mmdeploy.utils import get_file_path + def get_ops_path() -> str: """Get the library path of onnxruntime custom ops. @@ -9,11 +10,8 @@ def get_ops_path() -> str: Returns: str: The library path to onnxruntime custom ops. """ - wildcard = os.path.abspath( - os.path.join( - os.path.dirname(__file__), - '../../../build/lib/libmmdeploy_onnxruntime_ops.so')) - - paths = glob.glob(wildcard) - lib_path = paths[0] if len(paths) > 0 else '' - return lib_path + candidates = [ + '../../../build/lib/libmmdeploy_onnxruntime_ops.so', + '../../../build/bin/*/mmdeploy_onnxruntime_ops.dll', + ] + return get_file_path(os.path.dirname(__file__), candidates) diff --git a/mmdeploy/backend/sdk/__init__.py b/mmdeploy/backend/sdk/__init__.py index 95a954850..ef648c4d5 100644 --- a/mmdeploy/backend/sdk/__init__.py +++ b/mmdeploy/backend/sdk/__init__.py @@ -3,14 +3,24 @@ import os import sys -lib_dir = os.path.abspath( - os.path.join(os.path.dirname(__file__), '../../../build/lib')) - -sys.path.insert(0, lib_dir) +from mmdeploy.utils import get_file_path _is_available = False -if importlib.util.find_spec('mmdeploy_python') is not None: +module_name = 'mmdeploy_python' + +candidates = [ + f'../../../build/lib/{module_name}.*.so', + f'../../../build/bin/*/{module_name}.*.pyd' +] + +lib_path = get_file_path(os.path.dirname(__file__), candidates) + +if lib_path: + lib_dir = os.path.dirname(lib_path) + sys.path.insert(0, lib_dir) + +if importlib.util.find_spec(module_name) is not None: from .wrapper import SDKWrapper __all__ = ['SDKWrapper'] _is_available = True diff --git a/mmdeploy/backend/tensorrt/init_plugins.py b/mmdeploy/backend/tensorrt/init_plugins.py index 9bb0da7f4..80c6eea4d 100644 --- a/mmdeploy/backend/tensorrt/init_plugins.py +++ b/mmdeploy/backend/tensorrt/init_plugins.py @@ -1,9 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. import ctypes -import glob import os -from mmdeploy.utils import get_root_logger +from mmdeploy.utils import get_file_path, get_root_logger def get_ops_path() -> str: @@ -12,14 +11,11 @@ def get_ops_path() -> str: Returns: str: A path of the TensorRT plugin library. """ - wildcard = os.path.abspath( - os.path.join( - os.path.dirname(__file__), - '../../../build/lib/libmmdeploy_tensorrt_ops.so')) - - paths = glob.glob(wildcard) - lib_path = paths[0] if len(paths) > 0 else '' - return lib_path + candidates = [ + '../../../build/lib/libmmdeploy_tensorrt_ops.so', + '../../../build/bin/*/mmdeploy_tensorrt_ops.dll' + ] + return get_file_path(os.path.dirname(__file__), candidates) def load_tensorrt_plugin() -> bool: diff --git a/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py b/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py index 903899b6e..e00ea7ef8 100644 --- a/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py +++ b/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py @@ -204,11 +204,7 @@ def forward(self, output = self.wrapper.invoke([img])[0] if test_mode: output = torch.from_numpy(output) - output = torch.permute(output, ( - 2, - 0, - 1, - )) + output = output.permute(2, 0, 1) output = output / 255. results = self.test_post_process([output], lq, gt) return results diff --git a/mmdeploy/utils/__init__.py b/mmdeploy/utils/__init__.py index 03543f9d5..b4b05bd07 100644 --- a/mmdeploy/utils/__init__.py +++ b/mmdeploy/utils/__init__.py @@ -8,7 +8,7 @@ is_dynamic_batch, is_dynamic_shape, load_config) from .constants import SDK_TASK_MAP, Backend, Codebase, Task from .device import parse_cuda_device_id, parse_device_id -from .utils import get_root_logger, target_wrapper +from .utils import get_file_path, get_root_logger, target_wrapper __all__ = [ 'is_dynamic_batch', 'is_dynamic_shape', 'get_task_type', 'get_codebase', @@ -18,5 +18,5 @@ 'get_model_inputs', 'cfg_apply_marks', 'get_input_shape', 'parse_device_id', 'parse_cuda_device_id', 'get_codebase_config', 'get_backend_config', 'get_root_logger', 'get_dynamic_axes', - 'target_wrapper', 'SDK_TASK_MAP' + 'target_wrapper', 'SDK_TASK_MAP', 'get_file_path' ] diff --git a/mmdeploy/utils/utils.py b/mmdeploy/utils/utils.py index 9917dd477..47a5a18c5 100644 --- a/mmdeploy/utils/utils.py +++ b/mmdeploy/utils/utils.py @@ -1,5 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. +import glob import logging +import os import sys import traceback from typing import Callable, Optional @@ -56,3 +58,21 @@ def get_root_logger(log_file=None, log_level=logging.INFO) -> logging.Logger: name='mmdeploy', log_file=log_file, log_level=log_level) return logger + + +def get_file_path(prefix, candidates) -> str: + """Search for file in candidates. + + Args: + prefix (str): Prefix of the paths. + cancidates (str): Candidate paths + Returns: + str: file path or '' if not found + """ + for candidate in candidates: + wildcard = os.path.abspath(os.path.join(prefix, candidate)) + paths = glob.glob(wildcard) + if paths: + lib_path = paths[0] + return lib_path + return '' diff --git a/tests/test_csrc/CMakeLists.txt b/tests/test_csrc/CMakeLists.txt index 3ff7e2d15..34cc0349d 100644 --- a/tests/test_csrc/CMakeLists.txt +++ b/tests/test_csrc/CMakeLists.txt @@ -14,60 +14,63 @@ aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR}/net NET_TC) aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR}/model MODEL_TC) set(DEVICE_TC) -foreach(DEVICE IN LISTS MMDEPLOY_TARGET_DEVICES) - list(APPEND DEVICE_TC - ${CMAKE_CURRENT_SOURCE_DIR}/device/test_${DEVICE}_device.cpp) -endforeach() +foreach (DEVICE IN LISTS MMDEPLOY_TARGET_DEVICES) + list(APPEND DEVICE_TC + ${CMAKE_CURRENT_SOURCE_DIR}/device/test_${DEVICE}_device.cpp) +endforeach () set(CAPI_TC) -if("all" IN_LIST MMDEPLOY_CODEBASES) - set(TASK_LIST - "classifier;detector;segmentor;text_detector;text_recognizer;restorer;model" - ) - set(CODEBASES "mmcls;mmdet;mmseg;mmedit;mmocr") -else() - set(TASK_LIST "model") - set(CODEBASES "${MMDEPLOY_CODEBASES}") - if("mmcls" IN_LIST MMDEPLOY_CODEBASES) - list(APPEND TASK_LIST "classifier") - endif() - if("mmdet" IN_LIST MMDEPLOY_CODEBASES) - list(APPEND TASK_LIST "detector") - endif() - if("mmseg" IN_LIST MMDEPLOY_CODEBASES) - list(APPEND TASK_LIST "segmentor") - endif() - if("mmedit" IN_LIST MMDEPLOY_CODEBASES) - list(APPEND TASK_LIST "restorer") - endif() - if("mmocr" IN_LIST MMDEPLOY_CODEBASES) - list(APPEND TASK_LIST "text_detector") - list(APPEND TASK_LIST "text_recognizer") - endif() -endif() -foreach(TASK ${TASK_LIST}) - list(APPEND CAPI_TC ${CMAKE_CURRENT_SOURCE_DIR}/capi/test_${TASK}.cpp) -endforeach() +if ("all" IN_LIST MMDEPLOY_CODEBASES) + set(TASK_LIST + "classifier;detector;segmentor;text_detector;text_recognizer;restorer;model" + ) + set(CODEBASES "mmcls;mmdet;mmseg;mmedit;mmocr") +else () + set(TASK_LIST "model") + set(CODEBASES "${MMDEPLOY_CODEBASES}") + if ("mmcls" IN_LIST MMDEPLOY_CODEBASES) + list(APPEND TASK_LIST "classifier") + endif () + if ("mmdet" IN_LIST MMDEPLOY_CODEBASES) + list(APPEND TASK_LIST "detector") + endif () + if ("mmseg" IN_LIST MMDEPLOY_CODEBASES) + list(APPEND TASK_LIST "segmentor") + endif () + if ("mmedit" IN_LIST MMDEPLOY_CODEBASES) + list(APPEND TASK_LIST "restorer") + endif () + if ("mmocr" IN_LIST MMDEPLOY_CODEBASES) + list(APPEND TASK_LIST "text_detector") + list(APPEND TASK_LIST "text_recognizer") + endif () +endif () +foreach (TASK ${TASK_LIST}) + list(APPEND CAPI_TC ${CMAKE_CURRENT_SOURCE_DIR}/capi/test_${TASK}.cpp) +endforeach () # generate the header file configure_file(config/test_define.h.in - ${CMAKE_CURRENT_SOURCE_DIR}/test_define.h) + ${CMAKE_CURRENT_SOURCE_DIR}/test_define.h) set(TC_SRCS - ${TC_SRCS} - ${ARCHIVE_TC} - ${CORE_TC} - ${TRANSFORM_TC} - ${MODEL_TC} - ${NET_TC} - ${DEVICE_TC} - ${CAPI_TC}) + ${TC_SRCS} + ${ARCHIVE_TC} + ${CORE_TC} + ${TRANSFORM_TC} + ${MODEL_TC} + ${NET_TC} + ${DEVICE_TC} + ${CAPI_TC}) add_executable(mmdeploy_tests ${TC_SRCS}) target_include_directories(mmdeploy_tests - PRIVATE ${CMAKE_SOURCE_DIR}/third_party/catch2) + PRIVATE ${CMAKE_SOURCE_DIR}/third_party/catch2) target_include_directories(mmdeploy_tests PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) -target_link_libraries( - mmdeploy_tests PRIVATE ${MMDEPLOY_LIBS} ${OpenCV_LIBS} - -Wl,--no-as-needed ${MMDEPLOY_DYNAMIC_MODULES} -Wl,--as-need - -Wl,--whole-archive ${MMDEPLOY_STATIC_MODULES} -Wl,--no-whole-archive) + +mmdeploy_load_static(mmdeploy_tests MMDeployStaticModules) +mmdeploy_load_dynamic(mmdeploy_tests MMDeployDynamicModules) +target_link_libraries(mmdeploy_tests PRIVATE + MMDeployLibs + mmdeploy_transform + mmdeploy_opencv_utils) diff --git a/tests/test_csrc/archive/test_value_archive.cpp b/tests/test_csrc/archive/test_value_archive.cpp index 9e53f1226..f46316e35 100644 --- a/tests/test_csrc/archive/test_value_archive.cpp +++ b/tests/test_csrc/archive/test_value_archive.cpp @@ -1,5 +1,12 @@ // Copyright (c) OpenMMLab. All rights reserved. +// clang-format off + +#include "catch.hpp" + +// clang-format on + +#include #include #include #include @@ -8,10 +15,9 @@ #include #include #include -#include "core/utils/formatter.h" #include "archive/value_archive.h" -#include "catch.hpp" +#include "core/utils/formatter.h" // clang-format off @@ -41,8 +47,8 @@ TEMPLATE_LIST_TEST_CASE("test array-like for value", "[value]", ArrayLikeTypes) } TEST_CASE("test native array for value archive", "[value1]") { - const int a[10] = {0,1,2,3,4,5,6,7,8,9}; - int b[10] = {0,0,0,0,0,0,0,0,0,0}; + const int a[10] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + int b[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; mmdeploy::Value value; mmdeploy::ValueOutputArchive oa(value); oa(a); diff --git a/tests/test_csrc/capi/test_classifier.cpp b/tests/test_csrc/capi/test_classifier.cpp index 5e0b0f413..1b4ff1bbf 100644 --- a/tests/test_csrc/capi/test_classifier.cpp +++ b/tests/test_csrc/capi/test_classifier.cpp @@ -33,11 +33,11 @@ TEST_CASE("test classifier's c api", "[classifier]") { ret = mmdeploy_classifier_apply(handle, mats.data(), (int)mats.size(), &results, &result_count); REQUIRE(ret == MM_SUCCESS); auto result_ptr = results; - INFO("model_path: {}", model_path); + MMDEPLOY_INFO("model_path: {}", model_path); for (auto i = 0; i < (int)mats.size(); ++i) { - INFO("the {}-th classification result: ", i); + MMDEPLOY_INFO("the {}-th classification result: ", i); for (int j = 0; j < *result_count; ++j, ++result_ptr) { - INFO("\t label: {}, score: {}", result_ptr->label_id, result_ptr->score); + MMDEPLOY_INFO("\t label: {}, score: {}", result_ptr->label_id, result_ptr->score); } } @@ -46,12 +46,12 @@ TEST_CASE("test classifier's c api", "[classifier]") { }; auto gResources = MMDeployTestResources::Get(); - auto img_lists = gResources.LocateImageResources("mmcls/images"); + auto img_lists = gResources.LocateImageResources(fs::path{"mmcls"} / "images"); REQUIRE(!img_lists.empty()); for (auto& backend : gResources.backends()) { DYNAMIC_SECTION("loop backend: " << backend) { - auto model_list = gResources.LocateModelResources("mmcls/" + backend); + auto model_list = gResources.LocateModelResources(fs::path{"mmcls/"} / backend); REQUIRE(!model_list.empty()); for (auto& model_path : model_list) { for (auto& device_name : gResources.device_names(backend)) { diff --git a/tests/test_csrc/capi/test_detector.cpp b/tests/test_csrc/capi/test_detector.cpp index f7a72e541..a801d352a 100644 --- a/tests/test_csrc/capi/test_detector.cpp +++ b/tests/test_csrc/capi/test_detector.cpp @@ -6,12 +6,13 @@ #include "apis/c/detector.h" #include "core/logger.h" +#include "core/utils/formatter.h" #include "opencv2/opencv.hpp" #include "test_resource.h" - using namespace std; TEST_CASE("test detector's c api", "[detector]") { + MMDEPLOY_INFO("test detector"); auto test = [](const string &device, const string &model_path, const vector &img_list) { mm_handle_t handle{nullptr}; auto ret = mmdeploy_detector_create_by_path(model_path.c_str(), device.c_str(), 0, &handle); @@ -32,26 +33,30 @@ TEST_CASE("test detector's c api", "[detector]") { REQUIRE(ret == MM_SUCCESS); auto result_ptr = results; for (auto i = 0; i < mats.size(); ++i) { - INFO("the '{}-th' image has '{}' objects", i, result_count[i]); + MMDEPLOY_INFO("the '{}-th' image has '{}' objects", i, result_count[i]); for (auto j = 0; j < result_count[i]; ++j, ++result_ptr) { auto &bbox = result_ptr->bbox; - INFO(" >> bbox[{}, {}, {}, {}], label_id {}, score {}", bbox.left, bbox.top, bbox.right, - bbox.bottom, result_ptr->label_id, result_ptr->score); + MMDEPLOY_INFO(" >> bbox[{}, {}, {}, {}], label_id {}, score {}", bbox.left, bbox.top, + bbox.right, bbox.bottom, result_ptr->label_id, result_ptr->score); } } mmdeploy_detector_release_result(results, result_count, (int)mats.size()); mmdeploy_detector_destroy(handle); }; - - auto gResources = MMDeployTestResources::Get(); - auto img_lists = gResources.LocateImageResources("mmdet/images"); + MMDEPLOY_INFO("get test resources"); + auto &gResources = MMDeployTestResources::Get(); + MMDEPLOY_INFO("locate image resources"); + auto img_lists = gResources.LocateImageResources(fs::path{"mmdet"} / "images"); + MMDEPLOY_INFO("{}", img_lists.size()); REQUIRE(!img_lists.empty()); for (auto &backend : gResources.backends()) { + MMDEPLOY_INFO("backend: {}", backend); DYNAMIC_SECTION("loop backend: " << backend) { - auto model_list = gResources.LocateModelResources("mmdet/" + backend); + auto model_list = gResources.LocateModelResources(fs::path{"mmdet"} / backend); REQUIRE(!model_list.empty()); for (auto &model_path : model_list) { + MMDEPLOY_INFO("model: {}", model_path); for (auto &device_name : gResources.device_names(backend)) { test(device_name, model_path, img_lists); } diff --git a/tests/test_csrc/capi/test_model.cpp b/tests/test_csrc/capi/test_model.cpp index af0a98362..d9bab881f 100644 --- a/tests/test_csrc/capi/test_model.cpp +++ b/tests/test_csrc/capi/test_model.cpp @@ -12,7 +12,7 @@ TEST_CASE("test model c capi", "[model]") { std::string model_path; for (auto const &codebase : gResource.codebases()) { for (auto const &backend : gResource.backends()) { - if (auto _model_list = gResource.LocateModelResources(codebase + "/" + backend); + if (auto _model_list = gResource.LocateModelResources(fs::path{codebase} / backend); !_model_list.empty()) { model_path = _model_list.front(); break; diff --git a/tests/test_csrc/capi/test_restorer.cpp b/tests/test_csrc/capi/test_restorer.cpp index 502d37702..4e5653717 100644 --- a/tests/test_csrc/capi/test_restorer.cpp +++ b/tests/test_csrc/capi/test_restorer.cpp @@ -40,12 +40,12 @@ TEST_CASE("test restorer's c api", "[restorer]") { }; auto gResources = MMDeployTestResources::Get(); - auto img_lists = gResources.LocateImageResources("mmedit/images"); + auto img_lists = gResources.LocateImageResources(fs::path{"mmedit"} / "images"); REQUIRE(!img_lists.empty()); for (auto &backend : gResources.backends()) { DYNAMIC_SECTION("loop backend: " << backend) { - auto model_list = gResources.LocateModelResources("mmedit/" + backend); + auto model_list = gResources.LocateModelResources(fs::path{"mmedit"} / backend); REQUIRE(!model_list.empty()); for (auto &model_path : model_list) { for (auto &device_name : gResources.device_names(backend)) { diff --git a/tests/test_csrc/capi/test_segmentor.cpp b/tests/test_csrc/capi/test_segmentor.cpp index b042d793c..6de6150bf 100644 --- a/tests/test_csrc/capi/test_segmentor.cpp +++ b/tests/test_csrc/capi/test_segmentor.cpp @@ -43,12 +43,12 @@ TEST_CASE("test segmentor's c api", "[segmentor]") { }; auto gResources = MMDeployTestResources::Get(); - auto img_lists = gResources.LocateImageResources("mmseg/images"); + auto img_lists = gResources.LocateImageResources(fs::path{"mmseg"} / "images"); REQUIRE(!img_lists.empty()); for (auto &backend : gResources.backends()) { DYNAMIC_SECTION("loop backend: " << backend) { - auto model_list = gResources.LocateModelResources("mmseg/" + backend); + auto model_list = gResources.LocateModelResources(fs::path{"mmseg"} / backend); REQUIRE(!model_list.empty()); for (auto &model_path : model_list) { for (auto &device_name : gResources.device_names(backend)) { diff --git a/tests/test_csrc/capi/test_text_detector.cpp b/tests/test_csrc/capi/test_text_detector.cpp index a2bdd8449..af12d14e0 100644 --- a/tests/test_csrc/capi/test_text_detector.cpp +++ b/tests/test_csrc/capi/test_text_detector.cpp @@ -34,12 +34,12 @@ TEST_CASE("test text detector's c api", "[text-detector]") { auto result_ptr = results; for (auto i = 0; i < mats.size(); ++i) { - INFO("the {}-th image has '{}' objects", i, result_count[i]); + MMDEPLOY_INFO("the {}-th image has '{}' objects", i, result_count[i]); for (auto j = 0; j < result_count[i]; ++j, ++result_ptr) { auto& bbox = result_ptr->bbox; - INFO(">> bbox[{}].score: {}, coordinate: ", i, result_ptr->score); + MMDEPLOY_INFO(">> bbox[{}].score: {}, coordinate: ", i, result_ptr->score); for (auto& _bbox : result_ptr->bbox) { - INFO(">> >> ({}, {})", _bbox.x, _bbox.y); + MMDEPLOY_INFO(">> >> ({}, {})", _bbox.x, _bbox.y); } } } @@ -49,12 +49,12 @@ TEST_CASE("test text detector's c api", "[text-detector]") { }; auto& gResources = MMDeployTestResources::Get(); - auto img_list = gResources.LocateImageResources("mmocr/images"); + auto img_list = gResources.LocateImageResources(fs::path{"mmocr"} / "images"); REQUIRE(!img_list.empty()); for (auto& backend : gResources.backends()) { DYNAMIC_SECTION("loop backend: " << backend) { - auto model_list = gResources.LocateModelResources("mmocr/textdet/" + backend); + auto model_list = gResources.LocateModelResources(fs::path{"mmocr"} / "textdet" / "backend"); REQUIRE(!model_list.empty()); for (auto& model_path : model_list) { for (auto& device_name : gResources.device_names(backend)) { diff --git a/tests/test_csrc/capi/test_text_recognizer.cpp b/tests/test_csrc/capi/test_text_recognizer.cpp index 94f01063d..3265c4b0e 100644 --- a/tests/test_csrc/capi/test_text_recognizer.cpp +++ b/tests/test_csrc/capi/test_text_recognizer.cpp @@ -35,7 +35,7 @@ TEST_CASE("test text recognizer's c api", "[text-recognizer]") { for (auto i = 0; i < mats.size(); ++i) { std::vector score(results[i].score, results[i].score + results[i].length); - INFO("image {}, text = {}, score = {}", i, results[i].text, score); + MMDEPLOY_INFO("image {}, text = {}, score = {}", i, results[i].text, score); } mmdeploy_text_recognizer_release_result(results, (int)mats.size()); @@ -43,12 +43,12 @@ TEST_CASE("test text recognizer's c api", "[text-recognizer]") { }; auto& gResources = MMDeployTestResources::Get(); - auto img_list = gResources.LocateImageResources("mmocr/images"); + auto img_list = gResources.LocateImageResources(fs::path{"mmocr"} / "images"); REQUIRE(!img_list.empty()); for (auto& backend : gResources.backends()) { DYNAMIC_SECTION("loop backend: " << backend) { - auto model_list = gResources.LocateModelResources("mmocr/textreg/" + backend); + auto model_list = gResources.LocateModelResources(fs::path{"mmocr"} / "textreg" / "backend"); REQUIRE(!model_list.empty()); for (auto& model_path : model_list) { for (auto& device_name : gResources.device_names(backend)) { @@ -93,7 +93,7 @@ TEST_CASE("test text detector-recognizer combo", "[text-detector-recognizer]") { for (int j = 0; j < bbox_count[i]; ++j) { auto& text = texts[offset + j]; std::vector score(text.score, text.score + text.length); - INFO("image {}, text = {}, score = {}", i, text.text, score); + MMDEPLOY_INFO("image {}, text = {}, score = {}", i, text.text, score); } offset += bbox_count[i]; } @@ -106,13 +106,15 @@ TEST_CASE("test text detector-recognizer combo", "[text-detector-recognizer]") { }; auto& gResources = MMDeployTestResources::Get(); - auto img_list = gResources.LocateImageResources("mmocr/images"); + auto img_list = gResources.LocateImageResources(fs::path{"mmocr"} / "images"); REQUIRE(!img_list.empty()); for (auto& backend : gResources.backends()) { DYNAMIC_SECTION("loop backend: " << backend) { - auto det_model_list = gResources.LocateModelResources("/mmocr/textdet/" + backend); - auto reg_model_list = gResources.LocateModelResources("/mmocr/textreg/" + backend); + auto det_model_list = + gResources.LocateModelResources(fs::path{"mmocr"} / "textdet" / backend); + auto reg_model_list = + gResources.LocateModelResources(fs::path{"mmocr"} / "textreg" / backend); REQUIRE(!det_model_list.empty()); REQUIRE(!reg_model_list.empty()); auto det_model_path = det_model_list.front(); diff --git a/tests/test_csrc/core/test_mat.cpp b/tests/test_csrc/core/test_mat.cpp index b1ae27cb3..bb3e1a884 100644 --- a/tests/test_csrc/core/test_mat.cpp +++ b/tests/test_csrc/core/test_mat.cpp @@ -1,6 +1,8 @@ // Copyright (c) OpenMMLab. All rights reserved. +#include #include +#include #include "catch.hpp" #include "core/logger.h" diff --git a/tests/test_csrc/core/test_status_code.cpp b/tests/test_csrc/core/test_status_code.cpp index 33059862d..1316a0795 100644 --- a/tests/test_csrc/core/test_status_code.cpp +++ b/tests/test_csrc/core/test_status_code.cpp @@ -26,13 +26,13 @@ TEST_CASE("test status_code", "[status_code]") { sqrt_of_negative().value(); } catch (const Exception& e) { REQUIRE(e.code() == eInvalidArgument); - INFO("{}", e.what()); + MMDEPLOY_INFO("{}", e.what()); } auto r = sqrt_of_negative(); REQUIRE(!r); REQUIRE(r.error() == eInvalidArgument); - INFO("{}", r.error().message().c_str()); + MMDEPLOY_INFO("{}", r.error().message().c_str()); } } // namespace mmdeploy diff --git a/tests/test_csrc/core/test_token.cpp b/tests/test_csrc/core/test_token.cpp deleted file mode 100644 index de2e0f0cb..000000000 --- a/tests/test_csrc/core/test_token.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) OpenMMLab. All rights reserved. - -#include - -#include "catch.hpp" -#include "experimental/collection.h" - -namespace token { - -using namespace mmdeploy::token; - -using batch_size = mmdeploy::Token; -using type = mmdeploy::Token; -using name = mmdeploy::Token; - -} // namespace token - -TEST_CASE("test token", "[token]") { - using namespace mmdeploy::token; - using mmdeploy::Collection; - - auto produce = [] { - Collection c; - c << token::batch_size{64} << token::type{"Resize"} << token::name("resize1"); - return c; - }; - - auto c = produce(); - - auto consume = [](token::batch_size b, token::type t) { - std::cout << b.key() << ": " << *b << "\n" << t.key() << ": " << *t << "\n"; - return std::string{"success"}; - }; - - (void)Apply(consume, c); -} diff --git a/tests/test_csrc/core/test_value.cpp b/tests/test_csrc/core/test_value.cpp index 07bfe6d7f..0ecc1c629 100644 --- a/tests/test_csrc/core/test_value.cpp +++ b/tests/test_csrc/core/test_value.cpp @@ -219,7 +219,7 @@ TEST_CASE("test pointer of Value", "[value]") { REQUIRE(p["object"].is_object()); REQUIRE(p["array"].is_array()); REQUIRE(p["array"].is_array()); - INFO("{}", p); + MMDEPLOY_INFO("{}", p); } TEST_CASE("test null Value", "[value]") { @@ -332,7 +332,7 @@ TEST_CASE("test speed of value", "[value]") { } auto t1 = std::chrono::high_resolution_clock::now(); auto dt = std::chrono::duration(t1 - t0).count(); - INFO("time = {}ms", (float)dt); + MMDEPLOY_INFO("time = {}ms", (float)dt); } TEST_CASE("test ctor of value", "[value]") { diff --git a/tests/test_csrc/device/test_cpu_device.cpp b/tests/test_csrc/device/test_cpu_device.cpp index 88164014c..3109f6cd4 100644 --- a/tests/test_csrc/device/test_cpu_device.cpp +++ b/tests/test_csrc/device/test_cpu_device.cpp @@ -10,93 +10,6 @@ using namespace mmdeploy; using namespace std::string_literals; -namespace mmdeploy { -Kernel CreateCpuKernel(std::function task); -} - -TEST_CASE("basic device", "[device]") { - Platform platform("cpu"); - REQUIRE(platform.GetPlatformName() == "cpu"s); - REQUIRE(platform.GetPlatformId() == 0); - - const Device host("cpu"); - Stream stream(host); - // REQUIRE(platform.CreateStream("cpu", &stream) == 0); - REQUIRE(stream); - - SECTION("basic stream") { - bool set_me{}; - auto kernel = CreateCpuKernel([&] { set_me = true; }); - REQUIRE(kernel); - REQUIRE(stream.Submit(kernel)); - REQUIRE(stream.Wait()); - REQUIRE(set_me); - } - - SECTION("recursive task") { - auto outer_loop = CreateCpuKernel([&] { - for (int i = 0; i < 10; ++i) { - auto inner_loop = CreateCpuKernel([&, i] { - for (int j = 0; j < 10; ++j) { - std::cerr << "(" << i << ", " << j << ") "; - } - std::cerr << "\n"; - }); - REQUIRE(stream.Submit(inner_loop)); - } - }); - REQUIRE(stream.Submit(outer_loop)); - REQUIRE(stream.Wait()); - } - - SECTION("basic event") { - Event event(host); - // REQUIRE(platform.CreateEvent("cpu", &event) == 0); - REQUIRE(event); - auto sleeping = CreateCpuKernel([&] { - std::cerr << "start sleeping\n"; - for (int i = 0; i < 5; ++i) { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - std::cerr << "0.1 second passed.\n"; - } - std::cerr << "time's up, waking up.\n"; - }); - for (int i = 0; i < 2; ++i) { - REQUIRE(stream.Submit(sleeping)); - REQUIRE(event.Record(stream)); - REQUIRE(event.Wait()); - std::cerr << "waked up.\n"; - } - } - - SECTION("event on stream") { - const int N = 10; - std::vector streams; - streams.reserve(N); - for (int i = 0; i < N; ++i) { - streams.emplace_back(host); - } - std::vector events; - events.reserve(N); - for (int i = 0; i < N; ++i) { - events.emplace_back(host); - } - for (int i = 0; i < N; ++i) { - auto kernel = CreateCpuKernel([&, i] { - std::cerr << "greatings from stream " << i << ".\n"; - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - std::cerr << "0.1 second passed, goodbye.\n"; - }); - if (i) { - REQUIRE(streams[i].DependsOn(events[i - 1])); - } - REQUIRE(streams[i].Submit(kernel)); - REQUIRE(events[i].Record(streams[i])); - } - REQUIRE(events.back().Wait()); - } -} - TEST_CASE("test buffer", "[buffer]") { using namespace mmdeploy; Device device{"cpu"}; diff --git a/tests/test_csrc/model/test_directory_model.cpp b/tests/test_csrc/model/test_directory_model.cpp index 6ea1bacc9..50091383a 100644 --- a/tests/test_csrc/model/test_directory_model.cpp +++ b/tests/test_csrc/model/test_directory_model.cpp @@ -24,14 +24,14 @@ TEST_CASE("test directory model", "[model]") { REQUIRE(!directory_model_list.empty()); auto model_dir = "sdk_models/good_model"; REQUIRE(gResource.IsDir(model_dir)); - auto model_path = gResource.resource_root_path() + "/" + model_dir; - REQUIRE(!model_impl->Init(model_path).has_error()); + auto model_path = gResource.resource_root_path() / model_dir; + REQUIRE(!model_impl->Init(model_path.string()).has_error()); REQUIRE(!model_impl->ReadFile("deploy.json").has_error()); REQUIRE(model_impl->ReadFile("not-existing-file").has_error()); model_dir = "sdk_models/bad_model"; REQUIRE(gResource.IsDir(model_dir)); - model_path = gResource.resource_root_path() + "/" + model_dir; - REQUIRE(!model_impl->Init(model_path).has_error()); + model_path = gResource.resource_root_path() / model_dir; + REQUIRE(!model_impl->Init(model_path.string()).has_error()); REQUIRE(model_impl->ReadMeta().has_error()); } diff --git a/tests/test_csrc/model/test_model.cpp b/tests/test_csrc/model/test_model.cpp index b00f8c2b5..34bd4c984 100644 --- a/tests/test_csrc/model/test_model.cpp +++ b/tests/test_csrc/model/test_model.cpp @@ -24,7 +24,8 @@ TEST_CASE("model constructor", "[model]") { TEST_CASE("model init", "[model]") { auto& gResource = MMDeployTestResources::Get(); for (auto& codebase : gResource.codebases()) { - if (auto img_list = gResource.LocateImageResources(codebase + "/images"); !img_list.empty()) { + if (auto img_list = gResource.LocateImageResources(fs::path{codebase} / "images"); + !img_list.empty()) { Model model; REQUIRE(model.Init(img_list.front()).has_error()); break; @@ -32,7 +33,7 @@ TEST_CASE("model init", "[model]") { } for (auto& codebase : gResource.codebases()) { for (auto& backend : gResource.backends()) { - if (auto model_list = gResource.LocateModelResources(codebase + "/" + backend); + if (auto model_list = gResource.LocateModelResources(fs::path{codebase} / backend); !model_list.empty()) { Model model; REQUIRE(!model.Init(model_list.front()).has_error()); diff --git a/tests/test_csrc/model/test_zip_model.cpp b/tests/test_csrc/model/test_zip_model.cpp index 48f787bde..8d5cb9ca7 100644 --- a/tests/test_csrc/model/test_zip_model.cpp +++ b/tests/test_csrc/model/test_zip_model.cpp @@ -25,10 +25,10 @@ TEST_CASE("test zip model", "[zip_model]") { auto& gResource = MMDeployTestResources::Get(); SECTION("bad sdk model") { - auto zip_model_path = "sdk_models/not_zip_file"; + auto zip_model_path = fs::path{"sdk_models"} / "not_zip_file"; REQUIRE(gResource.IsFile(zip_model_path)); - auto model_path = gResource.resource_root_path() + "/" + zip_model_path; - REQUIRE(model_impl->Init(model_path).has_error()); + auto model_path = gResource.resource_root_path() / zip_model_path; + REQUIRE(model_impl->Init(model_path.string()).has_error()); } SECTION("bad zip buffer") { std::vector buffer(100); @@ -36,10 +36,10 @@ TEST_CASE("test zip model", "[zip_model]") { } SECTION("good sdk model") { - auto zip_model_path = "sdk_models/good_model.zip"; + auto zip_model_path = fs::path{"sdk_models"} / "good_model.zip"; REQUIRE(gResource.IsFile(zip_model_path)); - auto model_path = gResource.resource_root_path() + "/" + zip_model_path; - REQUIRE(!model_impl->Init(model_path).has_error()); + auto model_path = gResource.resource_root_path() / zip_model_path; + REQUIRE(!model_impl->Init(model_path.string()).has_error()); REQUIRE(!model_impl->ReadFile("deploy.json").has_error()); REQUIRE(model_impl->ReadFile("not-exist-file").has_error()); REQUIRE(!model_impl->ReadMeta().has_error()); diff --git a/tests/test_csrc/net/test_ncnn_net.cpp b/tests/test_csrc/net/test_ncnn_net.cpp index 98b348c19..b55280c04 100644 --- a/tests/test_csrc/net/test_ncnn_net.cpp +++ b/tests/test_csrc/net/test_ncnn_net.cpp @@ -12,7 +12,7 @@ using namespace mmdeploy; TEST_CASE("test ncnn net", "[ncnn_net]") { auto& gResource = MMDeployTestResources::Get(); - auto model_list = gResource.LocateModelResources("mmcls/ncnn"); + auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "ncnn"); REQUIRE(!model_list.empty()); Model model(model_list.front()); diff --git a/tests/test_csrc/net/test_openvino_net.cpp b/tests/test_csrc/net/test_openvino_net.cpp index f4a2f683f..c3f82eb61 100644 --- a/tests/test_csrc/net/test_openvino_net.cpp +++ b/tests/test_csrc/net/test_openvino_net.cpp @@ -12,7 +12,7 @@ using namespace mmdeploy; TEST_CASE("test openvino net", "[openvino_net]") { auto& gResource = MMDeployTestResources::Get(); - auto model_list = gResource.LocateModelResources("mmcls/openvino"); + auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "openvino"); REQUIRE(!model_list.empty()); Model model(model_list.front()); diff --git a/tests/test_csrc/net/test_ort_net.cpp b/tests/test_csrc/net/test_ort_net.cpp index 506fbaf19..116221000 100644 --- a/tests/test_csrc/net/test_ort_net.cpp +++ b/tests/test_csrc/net/test_ort_net.cpp @@ -12,7 +12,7 @@ using namespace mmdeploy; TEST_CASE("test ort net", "[ort_net]") { auto& gResource = MMDeployTestResources::Get(); - auto model_list = gResource.LocateModelResources("mmcls/ort"); + auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "ort"); REQUIRE(!model_list.empty()); Model model(model_list.front()); diff --git a/tests/test_csrc/net/test_ppl_net.cpp b/tests/test_csrc/net/test_ppl_net.cpp index 64a6a478a..b5d34a8ab 100644 --- a/tests/test_csrc/net/test_ppl_net.cpp +++ b/tests/test_csrc/net/test_ppl_net.cpp @@ -12,7 +12,7 @@ using namespace mmdeploy; TEST_CASE("test pplnn net", "[ppl_net]") { auto& gResource = MMDeployTestResources::Get(); - auto model_list = gResource.LocateModelResources("mmcls/pplnn"); + auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "pplnn"); REQUIRE(!model_list.empty()); Model model(model_list.front()); diff --git a/tests/test_csrc/net/test_trt_net.cpp b/tests/test_csrc/net/test_trt_net.cpp index 2b2841d77..c1c579b2c 100644 --- a/tests/test_csrc/net/test_trt_net.cpp +++ b/tests/test_csrc/net/test_trt_net.cpp @@ -12,7 +12,7 @@ using namespace mmdeploy; TEST_CASE("test trt net", "[trt_net]") { auto& gResource = MMDeployTestResources::Get(); - auto model_list = gResource.LocateModelResources("mmcls/trt"); + auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "trt"); REQUIRE(!model_list.empty()); Model model(model_list.front()); diff --git a/tests/test_csrc/preprocess/test_compose.cpp b/tests/test_csrc/preprocess/test_compose.cpp index 9b7cd4d8d..97e8ea452 100644 --- a/tests/test_csrc/preprocess/test_compose.cpp +++ b/tests/test_csrc/preprocess/test_compose.cpp @@ -11,7 +11,7 @@ #include "core/registry.h" #include "core/utils/formatter.h" #include "json.hpp" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_crop.cpp b/tests/test_csrc/preprocess/test_crop.cpp index 836e52726..b5958b421 100644 --- a/tests/test_csrc/preprocess/test_crop.cpp +++ b/tests/test_csrc/preprocess/test_crop.cpp @@ -4,7 +4,7 @@ #include "catch.hpp" #include "core/mat.h" #include "core/utils/device_utils.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_image2tensor.cpp b/tests/test_csrc/preprocess/test_image2tensor.cpp index 13de3e341..16939a09b 100644 --- a/tests/test_csrc/preprocess/test_image2tensor.cpp +++ b/tests/test_csrc/preprocess/test_image2tensor.cpp @@ -2,7 +2,7 @@ #include "catch.hpp" #include "core/tensor.h" #include "core/utils/device_utils.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_load.cpp b/tests/test_csrc/preprocess/test_load.cpp index fa7ef5867..47abe91b3 100644 --- a/tests/test_csrc/preprocess/test_load.cpp +++ b/tests/test_csrc/preprocess/test_load.cpp @@ -4,7 +4,7 @@ #include "core/mat.h" #include "core/tensor.h" #include "core/utils/device_utils.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_normalize.cpp b/tests/test_csrc/preprocess/test_normalize.cpp index 647203c02..bf96f55b2 100644 --- a/tests/test_csrc/preprocess/test_normalize.cpp +++ b/tests/test_csrc/preprocess/test_normalize.cpp @@ -3,7 +3,7 @@ #include "catch.hpp" #include "core/mat.h" #include "core/utils/device_utils.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_pad.cpp b/tests/test_csrc/preprocess/test_pad.cpp index 3f1608b3b..338be4bba 100644 --- a/tests/test_csrc/preprocess/test_pad.cpp +++ b/tests/test_csrc/preprocess/test_pad.cpp @@ -3,7 +3,7 @@ #include "catch.hpp" #include "core/mat.h" #include "core/utils/device_utils.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_resize.cpp b/tests/test_csrc/preprocess/test_resize.cpp index 8c63d5a19..e5143f309 100644 --- a/tests/test_csrc/preprocess/test_resize.cpp +++ b/tests/test_csrc/preprocess/test_resize.cpp @@ -3,7 +3,7 @@ #include "catch.hpp" #include "core/mat.h" #include "core/utils/device_utils.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/test_resource.h b/tests/test_csrc/test_resource.h index 11fbd034e..f59c79bf5 100644 --- a/tests/test_csrc/test_resource.h +++ b/tests/test_csrc/test_resource.h @@ -5,21 +5,13 @@ #include #include #include +#include #include #include +#include "core/utils/filesystem.h" #include "test_define.h" -#if __GNUC__ >= 8 -#include -namespace fs = std::filesystem; -#else - -#include - -namespace fs = std::experimental::filesystem; -#endif - using namespace std; class MMDeployTestResources { @@ -35,51 +27,51 @@ class MMDeployTestResources { } const std::vector &backends() const { return backends_; } const std::vector &codebases() const { return codebases_; } - const std::string &resource_root_path() const { return resource_root_path_; } + const fs::path &resource_root_path() const { return resource_root_path_; } bool HasDevice(const std::string &name) const { return std::any_of(devices_.begin(), devices_.end(), [&](const std::string &device_name) { return device_name == name; }); } - bool IsDir(const std::string &dir_name) const { - fs::path path{resource_root_path_ + "/" + dir_name}; + bool IsDir(const fs::path &dir_name) const { + auto path = resource_root_path_ / dir_name; return fs::is_directory(path); } - bool IsFile(const std::string &file_name) const { - fs::path path{resource_root_path_ + "/" + file_name}; + bool IsFile(const fs::path &file_name) const { + auto path = resource_root_path_ / file_name; return fs::is_regular_file(path); } public: - std::vector LocateModelResources(const std::string &sdk_model_zoo_dir) { + std::vector LocateModelResources(const fs::path &sdk_model_zoo_dir) { std::vector sdk_model_list; if (resource_root_path_.empty()) { return sdk_model_list; } - fs::path path{resource_root_path_ + "/" + sdk_model_zoo_dir}; + auto path = resource_root_path_ / sdk_model_zoo_dir; if (!fs::is_directory(path)) { return sdk_model_list; } for (auto const &dir_entry : fs::directory_iterator{path}) { fs::directory_entry entry{dir_entry.path()}; if (auto const &_path = dir_entry.path(); fs::is_directory(_path)) { - sdk_model_list.push_back(dir_entry.path()); + sdk_model_list.push_back(dir_entry.path().string()); } } return sdk_model_list; } - std::vector LocateImageResources(const std::string &img_dir) { + std::vector LocateImageResources(const fs::path &img_dir) { std::vector img_list; if (resource_root_path_.empty()) { return img_list; } - fs::path path{resource_root_path_ + "/" + img_dir}; + auto path = resource_root_path_ / img_dir; if (!fs::is_directory(path)) { return img_list; } @@ -122,15 +114,17 @@ class MMDeployTestResources { return result; } - std::string LocateResourceRootPath(const fs::path &cur_path, int max_depth) { + fs::path LocateResourceRootPath(const fs::path &cur_path, int max_depth) { if (max_depth < 0) { return ""; } for (auto const &dir_entry : fs::directory_iterator{cur_path}) { fs::directory_entry entry{dir_entry.path()}; auto const &_path = dir_entry.path(); - if (fs::is_directory(_path) && _path.filename() == "mmdeploy_test_resources") { - return _path.string(); + // filename must be checked before fs::is_directory, the latter will throw + // when _path points to a system file on Windows + if (_path.filename() == "mmdeploy_test_resources" && fs::is_directory(_path)) { + return _path; } } // Didn't find 'mmdeploy_test_resources' in current directory. @@ -143,7 +137,8 @@ class MMDeployTestResources { std::vector backends_; std::vector codebases_; std::map> backend_devices_; - std::string resource_root_path_; + fs::path resource_root_path_; + // std::string resource_root_path_; }; #endif // MMDEPLOY_TEST_RESOURCE_H